linux_kernel/drivers/dma/mv_xor.h
Thomas Gleixner 2025cf9e19 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 288
Based on 1 normalized pattern(s):

  this program is free software you can redistribute it and or modify
  it under the terms and conditions of the gnu general public license
  version 2 as published by the free software foundation this program
  is distributed in the hope it will be useful but without any
  warranty without even the implied warranty of merchantability or
  fitness for a particular purpose see the gnu general public license
  for more details

extracted by the scancode license scanner the SPDX license identifier

  GPL-2.0-only

has been chosen to replace the boilerplate/reference in 263 file(s).

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Allison Randal <allison@lohutok.net>
Reviewed-by: Alexios Zavras <alexios.zavras@intel.com>
Cc: linux-spdx@vger.kernel.org
Link: https://lkml.kernel.org/r/20190529141901.208660670@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-06-05 17:36:37 +02:00

194 lines
6.6 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2007, 2008, Marvell International Ltd.
*/
#ifndef MV_XOR_H
#define MV_XOR_H
#include <linux/types.h>
#include <linux/io.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#define MV_XOR_POOL_SIZE (MV_XOR_SLOT_SIZE * 3072)
#define MV_XOR_SLOT_SIZE 64
#define MV_XOR_THRESHOLD 1
#define MV_XOR_MAX_CHANNELS 2
#define MV_XOR_MIN_BYTE_COUNT SZ_128
#define MV_XOR_MAX_BYTE_COUNT (SZ_16M - 1)
/* Values for the XOR_CONFIG register */
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
#define XOR_OPERATION_MODE_IN_DESC 7
#define XOR_DESCRIPTOR_SWAP BIT(14)
#define XOR_DESC_SUCCESS 0x40000000
#define XOR_DESC_OPERATION_XOR (0 << 24)
#define XOR_DESC_OPERATION_CRC32C (1 << 24)
#define XOR_DESC_OPERATION_MEMCPY (2 << 24)
#define XOR_DESC_DMA_OWNED BIT(31)
#define XOR_DESC_EOD_INT_EN BIT(31)
#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0)
#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4)
#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
#define XOR_INT_END_OF_DESC BIT(0)
#define XOR_INT_END_OF_CHAIN BIT(1)
#define XOR_INT_STOPPED BIT(2)
#define XOR_INT_PAUSED BIT(3)
#define XOR_INT_ERR_DECODE BIT(4)
#define XOR_INT_ERR_RDPROT BIT(5)
#define XOR_INT_ERR_WRPROT BIT(6)
#define XOR_INT_ERR_OWN BIT(7)
#define XOR_INT_ERR_PAR BIT(8)
#define XOR_INT_ERR_MBUS BIT(9)
#define XOR_INTR_ERRORS (XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \
XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN | \
XOR_INT_ERR_PAR | XOR_INT_ERR_MBUS)
#define XOR_INTR_MASK_VALUE (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
XOR_INT_STOPPED | XOR_INTR_ERRORS)
#define WINDOW_BASE(w) (0x50 + ((w) << 2))
#define WINDOW_SIZE(w) (0x70 + ((w) << 2))
#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2))
#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
#define WINDOW_COUNT 8
struct mv_xor_device {
void __iomem *xor_base;
void __iomem *xor_high_base;
struct clk *clk;
struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
int xor_type;
u32 win_start[WINDOW_COUNT];
u32 win_end[WINDOW_COUNT];
};
/**
* struct mv_xor_chan - internal representation of a XOR channel
* @pending: allows batching of hardware operations
* @lock: serializes enqueue/dequeue operations to the descriptors pool
* @mmr_base: memory mapped register base
* @idx: the index of the xor channel
* @chain: device chain view of the descriptors
* @free_slots: free slots usable by the channel
* @allocated_slots: slots allocated by the driver
* @completed_slots: slots completed by HW but still need to be acked
* @device: parent device
* @common: common dmaengine channel object members
* @slots_allocated: records the actual size of the descriptor slot pool
* @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
* @op_in_desc: new mode of driver, each op is writen to descriptor.
*/
struct mv_xor_chan {
int pending;
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
void __iomem *mmr_high_base;
unsigned int idx;
int irq;
struct list_head chain;
struct list_head free_slots;
struct list_head allocated_slots;
struct list_head completed_slots;
dma_addr_t dma_desc_pool;
void *dma_desc_pool_virt;
size_t pool_size;
struct dma_device dmadev;
struct dma_chan dmachan;
int slots_allocated;
struct tasklet_struct irq_tasklet;
int op_in_desc;
char dummy_src[MV_XOR_MIN_BYTE_COUNT];
char dummy_dst[MV_XOR_MIN_BYTE_COUNT];
dma_addr_t dummy_src_addr, dummy_dst_addr;
u32 saved_config_reg, saved_int_mask_reg;
struct mv_xor_device *xordev;
};
/**
* struct mv_xor_desc_slot - software descriptor
* @node: node on the mv_xor_chan lists
* @hw_desc: virtual address of the hardware descriptor chain
* @phys: hardware address of the hardware descriptor chain
* @slot_used: slot in use or not
* @idx: pool index
* @tx_list: list of slots that make up a multi-descriptor transaction
* @async_tx: support for the async_tx api
*/
struct mv_xor_desc_slot {
struct list_head node;
struct list_head sg_tx_list;
enum dma_transaction_type type;
void *hw_desc;
u16 idx;
struct dma_async_tx_descriptor async_tx;
};
/*
* This structure describes XOR descriptor size 64bytes. The
* mv_phy_src_idx() macro must be used when indexing the values of the
* phy_src_addr[] array. This is due to the fact that the 'descriptor
* swap' feature, used on big endian systems, swaps descriptors data
* within blocks of 8 bytes. So two consecutive values of the
* phy_src_addr[] array are actually swapped in big-endian, which
* explains the different mv_phy_src_idx() implementation.
*/
#if defined(__LITTLE_ENDIAN)
struct mv_xor_desc {
u32 status; /* descriptor execution status */
u32 crc32_result; /* result of CRC-32 calculation */
u32 desc_command; /* type of operation to be carried out */
u32 phy_next_desc; /* next descriptor address pointer */
u32 byte_count; /* size of src/dst blocks in bytes */
u32 phy_dest_addr; /* destination block address */
u32 phy_src_addr[8]; /* source block addresses */
u32 reserved0;
u32 reserved1;
};
#define mv_phy_src_idx(src_idx) (src_idx)
#else
struct mv_xor_desc {
u32 crc32_result; /* result of CRC-32 calculation */
u32 status; /* descriptor execution status */
u32 phy_next_desc; /* next descriptor address pointer */
u32 desc_command; /* type of operation to be carried out */
u32 phy_dest_addr; /* destination block address */
u32 byte_count; /* size of src/dst blocks in bytes */
u32 phy_src_addr[8]; /* source block addresses */
u32 reserved1;
u32 reserved0;
};
#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
#endif
#define to_mv_sw_desc(addr_hw_desc) \
container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
#define mv_hw_desc_slot_idx(hw_desc, idx) \
((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
#endif