diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index b475ba84b8967..45e090702759b 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -175,3 +175,10 @@ CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_FUNCTION_TRACER=y CONFIG_DEBUG_USER=y +CONFIG_MATHWORKS_IP_CORE=y +CONFIG_MATHWORKS_GENERIC_OF=y +CONFIG_MWIPCORE=y +CONFIG_MWIPCORE_DMA_STREAMING=y +CONFIG_MWIPCORE_IIO_STREAMING=y +CONFIG_MWIPCORE_IIO_MM=y +CONFIG_MWIPCORE_IIO_SHAREDMEM=y diff --git a/drivers/misc/mathworks/Kconfig b/drivers/misc/mathworks/Kconfig new file mode 100755 index 0000000000000..0c17dd3c7421a --- /dev/null +++ b/drivers/misc/mathworks/Kconfig @@ -0,0 +1,110 @@ +# +# mwgeneric driver configuration +# + +menu "MathWorks IP Drivers" + + + +config MATHWORKS_IP_CORE + tristate "MathWorks IP Drivers" + depends on (OF) || PCI + default m + help + Say Y here if you want to add support for the MathWorks IP + drivers + + If unsure, say N. + + To compile this driver as a module, choose M here: + the module will be called mathworks_ip_common + +if MATHWORKS_IP_CORE + +config MWIPCORE + tristate "Mathwork IP Core Driver" + depends on OF + default m + help + Say Y here if you want to add support for the MathWorks IP + core driver + + If unsure, say N. + + To compile this driver as a module, choose M here: + the module will be called mwipcore +if MWIPCORE + +config MWIPCORE_DMA_STREAMING + tristate "MathWorks DMA Streaming Driver" + depends on DMADEVICES + default m + help + Enable support for MathWorks raw DMA-based implementation + of streaming DMA + + If unsure, say N. + + To compile this driver as a module, choose M here: + the module will be called mwipcore_dma_streaming + +config MWIPCORE_IIO_STREAMING + tristate "MathWorks IIO Streaming Driver" + depends on DMADEVICES + depends on IIO + select IIO_BUFFER + select IIO_BUFFER_DMAENGINE + default m + help + Enable support for MathWorks IIO-based implementation of + streaming DMA + + To compile this driver as a module, choose M here: + the module will be called mwipcore_iio_streaming + +config MWIPCORE_IIO_MM + tristate "MathWorks IIO Memory Mapped Driver" + depends on DMADEVICES + depends on IIO + select IIO_BUFFER + select IIO_BUFFER_DMAENGINE + default m + help + Enable support for MathWorks IIO-based implementation of + memory mapped IO and DMA + + To compile this driver as a module, choose M here: + the module will be called mwipcore_iio_mm + +config MWIPCORE_IIO_SHAREDMEM + tristate "MathWorks IIO Shared Memory Driver" + depends on DMADEVICES + depends on IIO + select IIO_BUFFER + select IIO_BUFFER_DMAENGINE + default m + help + Enable support for MathWorks IIO-based implementation of + shared, random-access memory region + + To compile this driver as a module, choose M here: + the module will be called mwipcore_iio_sharedregion + +endif #if MWIPCORE + +config MATHWORKS_GENERIC_OF + tristate "MathWorks Generic OF Driver" + depends on OF_ADDRESS && OF_IRQ + default m + help + Say Y here if you want to add support for the MathWorks + Generic OF driver + + If unsure, say N. + + To compile this driver as a module, choose M here: + the module will be called mathworks_generic_of + +endif #if MWGENERIC +endmenu + diff --git a/drivers/misc/mathworks/Makefile b/drivers/misc/mathworks/Makefile new file mode 100755 index 0000000000000..2f7248c2c7034 --- /dev/null +++ b/drivers/misc/mathworks/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the AXI4 drivers. +# + +obj-$(CONFIG_MATHWORKS_IP_CORE) += mathworks_ip_common.o +obj-$(CONFIG_MATHWORKS_GENERIC_OF) += mathworks_generic_of.o + +obj-$(CONFIG_MWIPCORE) += mwipcore.o +obj-$(CONFIG_MWIPCORE_DMA_STREAMING) += mwipcore_dma_streaming.o +obj-$(CONFIG_MWIPCORE_IIO_STREAMING) += mwipcore_iio_streaming.o +obj-$(CONFIG_MWIPCORE_IIO_MM) += mwipcore_iio_mm.o +obj-$(CONFIG_MWIPCORE_IIO_SHAREDMEM) += mwipcore_iio_sharedmem.o + +mwipcore_dma_streaming-objs := mw_stream_channel.o +mwipcore_iio_streaming-objs := mw_stream_iio_channel.o +mwipcore_iio_mm-objs := mw_mm_iio_channel.o +mwipcore_iio_sharedmem-objs := mw_sharedmem_iio_channel.o +mwipcore-objs := mathworks_ipcore.o + diff --git a/drivers/misc/mathworks/mathworks_generic_of.c b/drivers/misc/mathworks/mathworks_generic_of.c new file mode 100755 index 0000000000000..4f3bc8c743837 --- /dev/null +++ b/drivers/misc/mathworks/mathworks_generic_of.c @@ -0,0 +1,185 @@ +/* + * MathWorks IP Generic OF Driver + * + * Copyright 2013-2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ip_to_pdev(x) (container_of(x->dev, struct platform_device, dev)) + +#define DRIVER_NAME "mathworks_generic_of" + +static void mwgen_of_unlink_i2c_device(struct mathworks_ip_info *thisIpcore){ + sysfs_remove_link(&thisIpcore->char_device->kobj, "i2c_device"); +} + +static void mwgen_of_unlink_i2c_adapter(struct mathworks_ip_info *thisIpcore){ + sysfs_remove_link(&thisIpcore->char_device->kobj, "i2c_adapter"); +} + +static int mathworks_generic_of_i2c_init(struct mathworks_ip_info *thisIpcore){ + struct device_node *nodePointer = thisIpcore->dev->of_node; + struct device_node *slave_node; + int status; + + slave_node = of_parse_phandle(nodePointer, "i2c-controller", 0); + if (slave_node) { + status = devm_add_action_helper(thisIpcore->dev, (devm_action_fn)of_node_put, slave_node); + if(status) + return status; + + dev_info(thisIpcore->dev, "creating i2c link\n"); + + thisIpcore->i2c = of_find_i2c_device_by_node(slave_node); + if(thisIpcore->i2c == NULL){ + dev_err(thisIpcore->dev, "could not find i2c device\n"); + return -ENODEV; + } + status = devm_add_action_helper(thisIpcore->dev, (devm_action_fn)put_device, &thisIpcore->i2c->dev); + if(status) + return status; + + dev_info(thisIpcore->dev, "Adding link to %s[%s]\n", thisIpcore->i2c->adapter->name, thisIpcore->i2c->name); + + /* add a link to the i2c device */ + status = sysfs_create_link(&thisIpcore->char_device->kobj, &thisIpcore->i2c->dev.kobj, "i2c_device"); + if (status) + return status; + status = devm_add_action_helper(thisIpcore->dev, (devm_action_fn)mwgen_of_unlink_i2c_device, thisIpcore); + if(status) + return status; + + /* add a link to the i2c bus */ + status = sysfs_create_link(&thisIpcore->char_device->kobj, &thisIpcore->i2c->adapter->dev.kobj, "i2c_adapter"); + if (status) + return status; + status = devm_add_action_helper(thisIpcore->dev, (devm_action_fn)mwgen_of_unlink_i2c_adapter, thisIpcore); + if(status) + return status; + + } else { + thisIpcore->i2c = NULL; + } + + return 0; +} + +static int mathworks_generic_of_get_param(struct mathworks_ip_info *thisIpcore, void *arg) +{ + struct mathworks_ip_param_info pinfo; + const void *paramData; + int len; + + if( copy_from_user(&pinfo, (struct mathworks_ip_param_info *)arg, sizeof(struct mathworks_ip_param_info)) ) { + return -EACCES; + } + + paramData = of_get_property(thisIpcore->dev->of_node,pinfo.name, &len); + pinfo.size = len; + + /* Copy the struct back to user space */ + if( copy_to_user((struct mathworks_ip_param_info*)arg, &pinfo, sizeof(struct mathworks_ip_param_info)) ) { + return -EACCES; + } + + /* Copy any data to the user buf */ + if (paramData) { + if( copy_to_user((void *)pinfo.buf, paramData, pinfo.size) ){ + return -EACCES; + } + } else { + return -ENODEV; + } + + return 0; +} + +static void mathworks_generic_of_get_devname(struct mathworks_ip_info *thisIpcore,char *devname){ + const char *of_devname = of_get_property(thisIpcore->dev->of_node,"mwgen,devname", NULL); + + snprintf(devname,MATHWORKS_IP_DEVNAME_LEN, "%s", of_devname); +} + +struct mathworks_ip_ops mw_of_ops = { + .get_devname = mathworks_generic_of_get_devname, + .get_param = mathworks_generic_of_get_param, + .fops = &mathworks_ip_common_fops, +}; + +static const struct of_device_id mathworks_generic_of_match[] = { + { .compatible = "mathworks,mwgeneric-v1.00",}, + { .compatible = "mathworks,mathworks_ip-v1.00",}, + {}, + +}; + +MODULE_DEVICE_TABLE(of, mathworks_generic_of_match); + + +static int mathworks_generic_of_probe(struct platform_device *pdev) +{ + int status = 0; + struct mathworks_ip_info *thisIpcore; + + + thisIpcore = devm_mathworks_ip_of_init(pdev, THIS_MODULE, &mw_of_ops, false); + if (IS_ERR(thisIpcore)) + return PTR_ERR(thisIpcore); + + status = devm_mathworks_ip_register(thisIpcore); + if(status) + { + dev_err(&pdev->dev, "mwgeneric device registration failed: %d\n", status); + return status; + } + +#if defined(CONFIG_I2C) + status = mathworks_generic_of_i2c_init(thisIpcore); + if (status){ + dev_err(&pdev->dev, "Failed to link I2C nodes: %d\n", status); + return status; + } +#endif + + return 0; +} + + +static int mathworks_generic_of_remove(struct platform_device *pdev) +{ + struct mathworks_ip_info *thisIpcore = dev_get_drvdata(&pdev->dev); + + dev_info(thisIpcore->dev, "free and release memory\n"); + + return 0; +} + + + +static struct platform_driver mathworks_ip_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = mathworks_generic_of_match, + }, + .probe = mathworks_generic_of_probe, + .remove = mathworks_generic_of_remove, +}; + +module_platform_driver(mathworks_ip_driver); + + +MODULE_AUTHOR("MathWorks, Inc"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(DRIVER_NAME ": MathWorks Generic OF driver"); +MODULE_ALIAS(DRIVER_NAME); diff --git a/drivers/misc/mathworks/mathworks_ip_common.c b/drivers/misc/mathworks/mathworks_ip_common.c new file mode 100755 index 0000000000000..aec790098ba38 --- /dev/null +++ b/drivers/misc/mathworks/mathworks_ip_common.c @@ -0,0 +1,585 @@ +/* + * MathWorks IP Common Functionality + * + * Copyright 2013-2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRIVER_NAME "mathworks_ip" + +// DMA Read/Write for use with HDL Verifier BRAM +#define HDLV_BRAM_BASE_ADDR 262144 + +/*Device structure for IPCore information*/ +static struct class *mathworks_ip_class = NULL; +static struct mathworks_ip_dev_info dev_table[MATHWORKS_IP_MAX_DEVTYPE] = {{{0}}}; + +/* + * setup for creating sysfs directory + */ +static ssize_t show_fpga_irq(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "status=0\n"); +} + +static ssize_t store_fpga_irq(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) +{ + return len; +} + +static DEVICE_ATTR(fpga_irq_0, S_IRWXU, show_fpga_irq, store_fpga_irq); + +static irqreturn_t mathworks_ip_intr_handler(int irq, void * theIpcore) +{ + struct mathworks_ip_info *thisIpcore = (struct mathworks_ip_info*) theIpcore; + dev_dbg(thisIpcore->dev, "IRQ %d Handled\n", irq); + /* irq is current Linux INT number - not equal to INT pin on the processor */ + /* thisIpcore->irq is starting Linux INT number for that DUT */ + /* Difference irq - thisIpcore->irq is relattive INT number as described above */ + /*relativeIntIndex = irq - thisIpcore->irq; currently supporting one Interrupt per DUT */ + sysfs_notify_dirent(thisIpcore->irq_kn[0]); + return IRQ_HANDLED; +} + +static int mathworks_ip_fasync_impl(int fd, struct file* fp, int mode) +{ + struct mathworks_ip_info *thisIpcore = fp->private_data; + return fasync_helper(fd, fp, mode, &thisIpcore->asyncq); +} + +static int mathworks_ip_open(struct inode *inode, struct file *fp) +{ + struct mathworks_ip_info *thisIpcore; + thisIpcore = container_of(inode->i_cdev, struct mathworks_ip_info, cdev); + fp->private_data = thisIpcore; + + return 0; +} + +static int mathworks_ip_close(struct inode *inode, struct file *fp) +{ + mathworks_ip_fasync_impl(-1, fp, 0); + return 0; +} + +static int mathworks_ip_dma_alloc(struct mathworks_ip_info *thisIpcore, size_t size) { + + struct mw_dma_info *dinfo = &thisIpcore->dma_info; + + if (dinfo->size != 0) { + dev_err(thisIpcore->dev, "DMA memory already allocated\n"); + return -EEXIST; + } + + dinfo->virt = dmam_alloc_coherent(thisIpcore->dev, size, + &dinfo->phys, GFP_KERNEL); + if(!dinfo->virt){ + dev_err(thisIpcore->dev, "failed to allocate DMA memory\n"); + return -ENOMEM; + } + dinfo->size = size; + + return 0; + +} + +static int mathworks_ip_dma_info(struct mathworks_ip_info *thisIpcore, void *arg) +{ + + struct mathworks_ip_dma_info dinfo; + + /* Copy the struct from user space */ + if( copy_from_user(&dinfo, (struct mathworks_ip_dma_info *)arg, sizeof(struct mathworks_ip_dma_info)) ) { + return -EACCES; + } + + /* Populate the struct with information */ + dinfo.size = thisIpcore->dma_info.size; + dinfo.phys = (void *)((uintptr_t)thisIpcore->dma_info.phys); + + /* Copy the struct back to user space */ + if( copy_to_user((struct mathworks_ip_dma_info*)arg, &dinfo, sizeof(struct mathworks_ip_dma_info)) ) { + return -EACCES; + } + + return 0; + +} + +static int mathworks_ip_reg_info(struct mathworks_ip_info *thisIpcore, void *arg) +{ + struct mathworks_ip_reg_info rinfo; + + /* Copy the struct from user space */ + if( copy_from_user(&rinfo, (struct mathworks_ip_reg_info *)arg, sizeof(struct mathworks_ip_reg_info)) ) { + return -EACCES; + } + + /* Populate the struct with information */ + rinfo.size = resource_size(thisIpcore->mem); + rinfo.phys = (void *)((uintptr_t)thisIpcore->mem->start); + + /* Copy the struct back to user space */ + if( copy_to_user((struct mathworks_ip_reg_info*)arg, &rinfo, sizeof(struct mathworks_ip_reg_info)) ) { + return -EACCES; + } + + return 0; +} + +static int mathworks_ip_get_devinfo(struct mathworks_ip_info *thisIpcore) +{ + int i, devname_len, status; + char devname[MATHWORKS_IP_DEVNAME_LEN]; + char *tgtDevname; + struct mathworks_ip_dev_info *thisDev; + + thisIpcore->ops->get_devname(thisIpcore,devname); + devname_len = strlen(devname); + for (i = 0; i < MATHWORKS_IP_MAX_DEVTYPE; i++) + { + /* Search for the device in the table */ + thisDev = &dev_table[i]; + tgtDevname=thisDev->devname; + if(*tgtDevname == 0){ + dev_info(thisIpcore->dev, "'%s' device not found, creating\n", devname); + break; + } + if(strncasecmp(tgtDevname,devname,devname_len) == 0) + { + dev_info(thisIpcore->dev, "'%s' device found, adding\n", devname); + thisIpcore->dev_info = thisDev; + return 0; + } + } + if ((*tgtDevname == 0) && i < MATHWORKS_IP_MAX_DEVTYPE) + { + /* Add in a new device to the table */ + strncpy(tgtDevname,devname,devname_len); + + status = alloc_chrdev_region(&thisDev->devid, 0, MATHWORKS_IP_MAX_DEVTYPE, devname); + if (status) + { + dev_err(thisIpcore->dev, "Character dev. region not allocated: %d\n", status); + return status; + } + dev_info(thisIpcore->dev, "Char dev region registered: major num:%d\n", MAJOR(thisDev->devid)); + dev_info(thisIpcore->dev, "'%s' device created\n", devname); + thisIpcore->dev_info = thisDev; + return 0; + } + + /* Not found and table full */ + thisIpcore->dev_info = NULL; + return -ENOMEM; + +} + +static void mathworks_ip_mmap_dma_open(struct vm_area_struct *vma) +{ + struct mathworks_ip_info * thisIpcore = vma->vm_private_data; + dev_info(thisIpcore->dev, "DMA VMA open, virt %lx, phys %lx \n", vma->vm_start, vma->vm_pgoff << PAGE_SHIFT); + +} + +static void mathworks_ip_mmap_dma_close(struct vm_area_struct *vma) +{ + struct mathworks_ip_info * thisIpcore = vma->vm_private_data; + dev_info(thisIpcore->dev, "DMA VMA close.\n"); + + /* Free the memory DMA */ + dmam_free_coherent(thisIpcore->dev,thisIpcore->dma_info.size, + thisIpcore->dma_info.virt, thisIpcore->dma_info.phys); + + /* Set the size to zero to indicate no memory is allocated */ + thisIpcore->dma_info.size = 0; +} + +static void mathworks_ip_mmap_open(struct vm_area_struct *vma) +{ + struct mathworks_ip_info * thisIpcore = vma->vm_private_data; + dev_info(thisIpcore->dev, "Simple VMA open, virt %lx, phys %lx \n", vma->vm_start, vma->vm_pgoff << PAGE_SHIFT); + +} + +static void mathworks_ip_mmap_close(struct vm_area_struct *vma) +{ + struct mathworks_ip_info * thisIpcore = vma->vm_private_data; + dev_info(thisIpcore->dev, "Simple VMA close.\n"); +} + + +static vm_fault_t mathworks_ip_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct mathworks_ip_info * thisIpcore = vma->vm_private_data; + struct page *thisPage; + unsigned long offset; + offset = (vmf->pgoff - vma->vm_pgoff) << PAGE_SHIFT; + thisPage = virt_to_page(thisIpcore->mem->start + offset); + get_page(thisPage); + vmf->page = thisPage; + return 0; +} + +static struct vm_operations_struct mathworks_ip_mmap_ops = { + .open = mathworks_ip_mmap_open, + .close = mathworks_ip_mmap_close, + .fault = mathworks_ip_mmap_fault, +}; + +static struct vm_operations_struct mathworks_ip_mmap_dma_ops = { + .open = mathworks_ip_mmap_dma_open, + .close = mathworks_ip_mmap_dma_close, +}; + +static int mathworks_ip_mmap(struct file *fp, struct vm_area_struct *vma) +{ + struct mathworks_ip_info *thisIpcore = fp->private_data; + size_t size = vma->vm_end - vma->vm_start; + int status = 0; + vma->vm_private_data = thisIpcore; + + dev_info(thisIpcore->dev, "[MMAP] size:%X pgoff: %lx\n", (unsigned int)size, vma->vm_pgoff); + + switch(vma->vm_pgoff) { + case 0: + if (!thisIpcore->mem) { + return -ENOMEM; + } + /* mmap the MMIO base address */ + vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTDUMP; // may be redundant with call to remap_pfn_range below + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, vma->vm_start, + thisIpcore->mem->start >> PAGE_SHIFT, + size, + vma->vm_page_prot)) + { + return -EAGAIN; + } + vma->vm_ops = &mathworks_ip_mmap_ops; + break; + default: + /* mmap DMA region */ + status = mathworks_ip_dma_alloc(thisIpcore, size); + if (status != 0) + return status; + + if (thisIpcore->dma_info.size == 0 || size != thisIpcore->dma_info.size) + return -EINVAL; + /* We want to mmap the whole buffer */ + vma->vm_pgoff = 0; + if (HDLV_BRAM_BASE_ADDR == vma->vm_pgoff) { + status = dma_mmap_coherent(thisIpcore->dev,vma, + thisIpcore->dma_info.virt, thisIpcore->mem->start, size); + } else { + status = dma_mmap_coherent(thisIpcore->dev,vma, + thisIpcore->dma_info.virt, thisIpcore->dma_info.phys, size); + } + vma->vm_ops = &mathworks_ip_mmap_dma_ops; + break; + } + //vma->vm_ops->open(vma); + + return status; +} + +static long mathworks_ip_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + /* struct ipcore_info *thisIpcore = fp->private_data; */ + int status; + struct mathworks_ip_info *thisIpcore = fp->private_data; + + if (NULL==thisIpcore) { + return -ENODEV; + } + + switch(cmd) { + case MATHWORKS_IP_GET_PARAM: + if (thisIpcore->ops->get_param) { + status = thisIpcore->ops->get_param(thisIpcore, (void *)arg); + } else { + status = -ENODEV; + } + + break; + + case MATHWORKS_IP_DMA_INFO: + + status = mathworks_ip_dma_info(thisIpcore, (void *)arg); + break; + + case MATHWORKS_IP_REG_INFO: + + status = mathworks_ip_reg_info(thisIpcore, (void *)arg); + break; + + default: + status = -EINVAL; + } + return status; +} + + + +static void mathworks_ip_remove_cdev(void *opaque){ + struct mathworks_ip_info *thisIpcore = opaque; + + sysfs_remove_file(&thisIpcore->dev->kobj, &dev_attr_fpga_irq_0.attr); + if(&thisIpcore->cdev) + { + dev_info(thisIpcore->dev, "Destroy character dev\n"); + device_destroy(mathworks_ip_class, thisIpcore->dev_id); + cdev_del(&thisIpcore->cdev); + } + + if (thisIpcore->dev_info){ + thisIpcore->dev_info->devcnt--; + + if(thisIpcore->dev_info->devcnt == 0) + { + dev_info(thisIpcore->dev, "release device region\n"); + unregister_chrdev_region(thisIpcore->dev_info->devid, MATHWORKS_IP_MAX_DEVTYPE); + } + } +} + +struct file_operations mathworks_ip_common_fops = { + .owner = THIS_MODULE, + .open = mathworks_ip_open, + .fasync = mathworks_ip_fasync_impl, + .release = mathworks_ip_close, + .mmap = mathworks_ip_mmap, + .unlocked_ioctl = mathworks_ip_ioctl, +}; + +EXPORT_SYMBOL_GPL(mathworks_ip_common_fops); + +static int mathworks_ip_setup_cdev(struct mathworks_ip_info *thisIpcore) +{ + int status = 0; + struct mathworks_ip_dev_info *dev_entry; + + if(mathworks_ip_class == NULL){ + return -EPROBE_DEFER; + } + cdev_init(&thisIpcore->cdev, thisIpcore->ops->fops); + thisIpcore->cdev.owner = thisIpcore->module; + + /* Find the device name */ + status = mathworks_ip_get_devinfo(thisIpcore); + if (status) + { + return status; + } + dev_entry = thisIpcore->dev_info; + + thisIpcore->dev_id = MKDEV(MAJOR(dev_entry->devid), dev_entry->devcnt); + status = cdev_add(&thisIpcore->cdev, thisIpcore->dev_id, 1); + if (status) { + goto add_err; + } + + thisIpcore->char_device = device_create(mathworks_ip_class, thisIpcore->dev, thisIpcore->dev_id, NULL, "%s%d", dev_entry->devname, dev_entry->devcnt++); + + + if(IS_ERR(thisIpcore->char_device)) + { + status = PTR_ERR(thisIpcore->char_device); + dev_err(thisIpcore->dev, "Error: failed to create device node %s, err %d\n", thisIpcore->name, status); + goto create_err; + } + + status = devm_add_action(thisIpcore->dev, mathworks_ip_remove_cdev, thisIpcore); + if(status){ + mathworks_ip_remove_cdev(thisIpcore); + return status; + } + + return status; +create_err: + dev_entry->devcnt--; + cdev_del(&thisIpcore->cdev); +add_err: + if(dev_entry->devcnt == 0) + unregister_chrdev_region(dev_entry->devid, MATHWORKS_IP_MAX_DEVTYPE); + return status; +} + +static void mathworks_ip_unregister(void *opaque){ + struct mathworks_ip_info *thisIpcore = opaque; + dev_set_drvdata(thisIpcore->dev, NULL); +} + +struct mathworks_ip_info *devm_mathworks_ip_of_init( + struct platform_device *pdev, + struct module *module, + struct mathworks_ip_ops *ops, + bool mapRegs) +{ + struct mathworks_ip_info *ipDev; + int status; + + ipDev = (struct mathworks_ip_info*)devm_kzalloc(&pdev->dev, sizeof(struct mathworks_ip_info), GFP_KERNEL); + if (!ipDev) + return ERR_PTR(-ENOMEM); + + if( !pdev || !ops || !ops->fops || !ops->get_devname) + return ERR_PTR(-EINVAL); + + ipDev->module = module; + ipDev->ops = ops; + ipDev->dev = &pdev->dev; + ipDev->name = pdev->dev.of_node->name; + /* Check for IRQ first, we may have to defer */ + ipDev->irq = platform_get_irq_optional(pdev, 0); + if (ipDev->irq < 0) { + switch (ipDev->irq){ + case -EPROBE_DEFER: + dev_info(&pdev->dev, "Deferring probe for IRQ resources\n"); + return ERR_PTR(-EPROBE_DEFER); + case -ENXIO: + ipDev->irq = 0; + break; + default : + return ERR_PTR(ipDev->irq); + } + } + /* Support only linear IRQ ranges */ + if (ipDev->irq){ + /* capture the number of irqs */ + ipDev->nirq = 1; + do { + status = platform_get_irq(pdev, ipDev->nirq); + if (status > 0){ + if (status == ipDev->irq + ipDev->nirq) + ipDev->nirq++; + else + dev_warn(&pdev->dev, "Non-sequential IRQs are not supported\n"); + } + } while(status > 0); + } + ipDev->mem = platform_get_resource(pdev, IORESOURCE_MEM,0); + if(ipDev->mem) + { + dev_info(&pdev->dev, "Dev memory resource found at %px %08lX. \n", + (void *)((uintptr_t)ipDev->mem->start), + (unsigned long)resource_size(ipDev->mem)); + ipDev->mem = devm_request_mem_region(&pdev->dev, ipDev->mem->start, resource_size(ipDev->mem), pdev->name); + + if (!ipDev->mem) + { + dev_err(&pdev->dev, "Error while request_mem_region call\n"); + return ERR_PTR(-ENODEV); + } + if(mapRegs){ + ipDev->regs = devm_ioremap(&pdev->dev, ipDev->mem->start, resource_size(ipDev->mem)); + if(!ipDev->regs) { + dev_err(&pdev->dev, "Failed to do ioremap\n"); + return ERR_PTR(-ENODEV); + } + } + } + + return ipDev; +} + +EXPORT_SYMBOL_GPL(devm_mathworks_ip_of_init); + +int devm_mathworks_ip_register(struct mathworks_ip_info *thisIpcore){ + int status; + char currentFileName[SYSFS_FILENAME_MAX_LENGTH]; + int i; + int irq_idx; + + status = mathworks_ip_setup_cdev(thisIpcore); + if(status) + { + dev_err(thisIpcore->dev, "mwipcore device addition failed: %d\n", status); + return status; + } + + /* It is possible that we have not required any interrupt */ + if (thisIpcore->irq) + { + for (irq_idx = 0; irq_idx < thisIpcore->nirq; irq_idx++) { + status = devm_request_irq(thisIpcore->dev, + thisIpcore->irq+irq_idx, + mathworks_ip_intr_handler, + 0, + thisIpcore->name, + thisIpcore); + if(status) + { + dev_err(thisIpcore->dev, "interrupt request addition failed.\n"); + return status; + } + } + + dev_info(thisIpcore->dev, "Enabled %d irqs from %d\n", thisIpcore->nirq, thisIpcore->irq); + } + + dev_set_drvdata(thisIpcore->dev, thisIpcore); + /* Add the release logic */ + status = devm_add_action(thisIpcore->dev, mathworks_ip_unregister, thisIpcore); + if(status){ + mathworks_ip_unregister(thisIpcore); + return status; + } + + status = sysfs_create_file(&thisIpcore->dev->kobj, &dev_attr_fpga_irq_0.attr); + if (status) { + printk(KERN_INFO "Error creating the sysfs device 0\n"); + return status; + } + + for(i=0; i < MAX_INTERRUPT_NODES_PER_DUT ; i++) { + snprintf(currentFileName,SYSFS_FILENAME_MAX_LENGTH ,"fpga_irq_%d",i); + thisIpcore->irq_kn[i] = sysfs_get_dirent(thisIpcore->dev->kobj.sd, currentFileName); + if(!thisIpcore->irq_kn[i]){ + printk(KERN_INFO "Error in file index %d\n", i); + } + } + return 0; +} + +EXPORT_SYMBOL_GPL(devm_mathworks_ip_register); + +static int __init mathworks_ip_init(void) +{ + mathworks_ip_class = class_create(THIS_MODULE, DRIVER_NAME); + if (IS_ERR(mathworks_ip_class)) + return PTR_ERR(mathworks_ip_class); + pr_info("Registered %s class\n", DRIVER_NAME); + return 0; +} + +static void __exit mathworks_ip_exit(void) +{ + + class_destroy(mathworks_ip_class); + mathworks_ip_class = NULL; +} + +module_init(mathworks_ip_init); +module_exit(mathworks_ip_exit); + +MODULE_AUTHOR("MathWorks, Inc"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MathWorks IP driver framework"); +MODULE_ALIAS(DRIVER_NAME); diff --git a/drivers/misc/mathworks/mathworks_ipcore.c b/drivers/misc/mathworks/mathworks_ipcore.c new file mode 100755 index 0000000000000..32dd896bf43cd --- /dev/null +++ b/drivers/misc/mathworks/mathworks_ipcore.c @@ -0,0 +1,178 @@ +/* + * MathWorks AXI DMA Driver + * + * Copyright 2014-2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + + +#include "mathworks_ipcore.h" +#include "mw_stream_channel.h" +#include "mw_stream_iio_channel.h" +#include "mw_mm_iio_channel.h" +#include "mw_sharedmem_iio_channel.h" + +#include + +enum mw_dev_mode { + MWDEV_MODE_LEGACY, + MWDEV_MODE_SUBDEV, + MWDEV_MODE_NONE, +}; + +struct mw_dev_info { + enum mw_dev_mode dev_mode; +}; + +/* + * @brief mathworks_ipcore_of_match + */ + +struct mw_dev_info subdev_dev_info = { + .dev_mode = MWDEV_MODE_SUBDEV, +}; + +struct mw_dev_info dma_legacy_dev_info = { + .dev_mode = MWDEV_MODE_LEGACY, +}; + +struct mw_dev_info nodev_legacy_dev_info = { + .dev_mode = MWDEV_MODE_NONE, +}; + +static const struct of_device_id mathworks_ipcore_of_match[] = { + { .compatible = "mathworks,mwipcore-v3.00", .data = &subdev_dev_info}, + { .compatible = "mathworks,mwipcore-v2.00", .data = &dma_legacy_dev_info}, + { .compatible = "mathworks,mwipcore-axi4lite-v1.00", .data = &nodev_legacy_dev_info}, + {}, +}; + +static void mathworks_ipcore_get_devname(struct mathworks_ip_info *mw_ip_info,char *devname){ + snprintf(devname,MATHWORKS_IP_DEVNAME_LEN, "%s", mw_ip_info->name); +} + +static struct mathworks_ip_ops mathworks_ipcore_ops = { + .get_devname = mathworks_ipcore_get_devname, + .get_param = NULL, + .fops = &mathworks_ip_common_fops, +}; + +/* + * @brief mathworks_ipcore_of_probe + */ +static int mathworks_ipcore_of_probe(struct platform_device *op) +{ + int status = 0; + struct device *dev = &op->dev; + struct mathworks_ipcore_dev *mwdev; + const struct of_device_id *id; + struct mathworks_ip_ops *ops; + const struct mw_dev_info *info; + + + mwdev = (struct mathworks_ipcore_dev*)devm_kzalloc(dev, sizeof(struct mathworks_ipcore_dev),GFP_KERNEL); + if (!mwdev) { + dev_err(dev, "Failed to allocate memory for device context\n"); + return -ENOMEM; + } + + id = of_match_node(mathworks_ipcore_of_match, op->dev.of_node); + if (!id || !id->data) + return -ENODEV; + info = id->data; + + switch(info->dev_mode){ + case MWDEV_MODE_LEGACY: + ops = mw_stream_channel_get_ops(); + break; + case MWDEV_MODE_SUBDEV: + case MWDEV_MODE_NONE: + default: + ops = &mathworks_ipcore_ops; + break; + + } + + mwdev->mw_ip_info = devm_mathworks_ip_of_init(op,THIS_MODULE,ops, true); + if (IS_ERR(mwdev->mw_ip_info)) + return PTR_ERR(mwdev->mw_ip_info); + + if (!mwdev->mw_ip_info->mem){ + dev_err(dev, "Failed to get resource for platform device\n"); + return -ENOENT; + } + + status = of_property_read_u32(dev->of_node, "mathworks,rst-reg", &mwdev->rst_reg); + if (status) { + /* Fall back to 0 if the property does not exist */ + mwdev->rst_reg = 0; + } + + + status = of_property_read_u32(dev->of_node, "mathworks,timestamp-reg", &mwdev->timestamp_reg); + if (status) { + /* Fall back to 0xFFFFFFFF if the property does not exist */ + mwdev->timestamp_reg = 0xFFFFFFFF; + } + + mwdev->mw_ip_info->private = mwdev; + + status = devm_mathworks_ip_register(mwdev->mw_ip_info); + if(status) + { + dev_err(dev, "MathWorks IP device registration failed: %d\n", status); + return status; + } + + switch(info->dev_mode){ + case MWDEV_MODE_LEGACY: + status = mw_stream_channels_probe(mwdev); + if(status) + return status; + break; + case MWDEV_MODE_SUBDEV: + status = mw_stream_iio_channels_probe(mwdev); + if (status) + return status; + status = mw_mm_iio_channels_probe(mwdev); + if (status) + return status; + status = mw_sharedmem_iio_channels_probe(mwdev); + if (status) + return status; + break; + case MWDEV_MODE_NONE: + default: + break; + } + + return status; +} + +/* + * @brief mathworks_ipcore_of_remove + */ +static int mathworks_ipcore_of_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver mathworks_ipcore_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = mathworks_ipcore_of_match, + }, + .probe = mathworks_ipcore_of_probe, + .remove = mathworks_ipcore_of_remove, +}; + +module_platform_driver(mathworks_ipcore_driver); + +MODULE_DEVICE_TABLE(of, mathworks_ipcore_of_match); +MODULE_AUTHOR("MathWorks, Inc"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(DRIVER_NAME ": MathWorks IP Core Driver"); + +/*DMA PARAMS */ diff --git a/drivers/misc/mathworks/mathworks_ipcore.h b/drivers/misc/mathworks/mathworks_ipcore.h new file mode 100755 index 0000000000000..61c24f2596daf --- /dev/null +++ b/drivers/misc/mathworks/mathworks_ipcore.h @@ -0,0 +1,68 @@ +/* + * MathWorks AXI DMA Driver + * + * Copyright 2014-2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#ifndef _MATHWORKS_IPCORE_H_ +#define _MATHWORKS_IPCORE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* Open firmware includes */ +#include +#include +#include +#include +#include + +#include + +#define DRIVER_NAME "mwipcore" +#define MAX_DEVICES 4 +#define MAX_CHANNELS 8 + +#ifdef _DEBUG +#define MW_DBG_text(txt) printk(KERN_INFO DRIVER_NAME txt) +#define MW_DBG_printf(txt,...) printk(KERN_INFO DRIVER_NAME txt,__VA_ARGS__) +#else +#define MW_DBG_printf(txt,...) +#define MW_DBG_text(txt) +#endif + +struct mathworks_ipcore_dev { + struct mathworks_ip_info *mw_ip_info; + u32 rst_reg; + u32 timestamp_reg; + void *private; +}; + +/********************************************************* +* API functions +*********************************************************/ + +static inline void mw_ip_reset(struct mathworks_ipcore_dev *mwdev) +{ + mw_ip_write32(mwdev->mw_ip_info, mwdev->rst_reg, 0x1); +} + +#endif /* _MATHWORKS_IPCORE_H_ */ diff --git a/drivers/misc/mathworks/mw_mm_iio_channel.c b/drivers/misc/mathworks/mw_mm_iio_channel.c new file mode 100755 index 0000000000000..8df91c451f653 --- /dev/null +++ b/drivers/misc/mathworks/mw_mm_iio_channel.c @@ -0,0 +1,334 @@ +/* + * MathWorks Memory Mapped Channel + * + * Copyright 2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include "mw_mm_iio_channel.h" +#include "mathworks_ipcore.h" + +static DEFINE_IDA(mw_mm_iio_channel_ida); + +#define MWDEV_TO_MWIP(mwdev) (mwdev->mw_ip_info) +#define IP2DEVP(mwdev) (MWDEV_TO_MWIP(mwdev)->dev) + +#define MW_MM_IIO_ENUM IIO_ENUM +#define MW_MM_IIO_ENUM_AVAILABLE(_name, _shared_by, _e) \ +{ \ + .name = (_name "_available"), \ + .shared = (_shared_by), \ + .read = iio_enum_available_read, \ + .private = (uintptr_t)(_e), \ +} + +struct mw_mm_iio_channel_info { + enum iio_device_direction iio_direction; +}; + +enum mw_mm_iio_reg_access { + MW_MM_IO_MODE_DISABLED = 0, + MW_MM_IO_MODE_ENABLED, +}; + +struct mw_mm_iio_chandev { + struct mathworks_ipcore_dev *mwdev; + struct device dev; + enum iio_device_direction iio_direction; + enum mw_mm_iio_reg_access reg_access; +}; + +static void mw_mm_iio_chan_ida_remove(void *opaque){ + struct mw_mm_iio_chandev* mwchan = opaque; + ida_simple_remove(&mw_mm_iio_channel_ida, mwchan->dev.id); +} + +/************* + * IO Modes + *************/ +static const char * const mw_mm_iio_reg_accesss[] = { "disabled", "enabled" }; + +static int mw_mm_iio_channel_get_reg_access(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct mw_mm_iio_chandev *mwchan = iio_priv(indio_dev); + + return mwchan->reg_access; +} + +static int mw_mm_iio_channel_set_reg_access(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, unsigned int reg_access) +{ + struct mw_mm_iio_chandev *mwchan = iio_priv(indio_dev); + + mutex_lock(&indio_dev->mlock); + mwchan->reg_access = reg_access; + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +static const struct iio_enum mw_mm_iio_reg_access_enum = { + .items = mw_mm_iio_reg_accesss, + .num_items = ARRAY_SIZE(mw_mm_iio_reg_accesss), + .get = mw_mm_iio_channel_get_reg_access, + .set = mw_mm_iio_channel_set_reg_access, +}; + +static const struct iio_chan_spec_ext_info mw_mm_iio_ch_reg_access[] = { + MW_MM_IIO_ENUM("reg_access", IIO_SHARED_BY_ALL, &mw_mm_iio_reg_access_enum), + MW_MM_IIO_ENUM_AVAILABLE("reg_access", IIO_SHARED_BY_ALL, &mw_mm_iio_reg_access_enum), + { }, +}; + + + +static int mw_mm_iio_channel_reg_access(struct iio_dev *indio_dev, + unsigned reg, unsigned writeval, + unsigned *readval) +{ + struct mw_mm_iio_chandev *mwchan = iio_priv(indio_dev); + int ret = 0; + + mutex_lock(&indio_dev->mlock); + if (mwchan->reg_access == MW_MM_IO_MODE_DISABLED){ + ret = -EACCES; + } else { + if (readval == NULL) { + mw_ip_write32(mwchan->mwdev->mw_ip_info, reg & 0xFFFF, writeval); + } else { + *readval = mw_ip_read32(mwchan->mwdev->mw_ip_info, reg & 0xFFFF); + } + } + + mutex_unlock(&indio_dev->mlock); + + return ret; +} + +static const struct iio_info mw_mm_iio_dev_info = { + .debugfs_reg_access = &mw_mm_iio_channel_reg_access, +}; + +static int mw_mm_setup_info_channel(struct iio_dev *indio_dev, + int index, const char *name, + const struct iio_chan_spec_ext_info *info) +{ + struct mw_mm_iio_chandev *mwchan = iio_priv(indio_dev); + struct iio_chan_spec *channel = (struct iio_chan_spec *) + &indio_dev->channels[index]; + + channel->type = IIO_GENERIC_DATA; + channel->indexed = 1; + channel->extend_name = devm_kstrdup(&mwchan->dev, name, GFP_KERNEL); + if (!channel->extend_name) + return -ENOMEM; + channel->ext_info = info; + channel->scan_index = -ENODEV; + + return 0; +} + +static void mw_mm_iio_unregister(void *opaque) { + struct device *dev = opaque; + + /* Unregister the IIO device */ + devres_release_group(dev, mw_mm_iio_unregister); +} + +static int devm_mw_mm_iio_register(struct iio_dev *indio_dev) { + struct mw_mm_iio_chandev *mwchan = iio_priv(indio_dev); + int status; + int chIdx = 0; + + if(!devres_open_group(&mwchan->dev, mw_mm_iio_unregister, GFP_KERNEL)) + return -ENOMEM; + + indio_dev->dev.parent = &mwchan->dev; + indio_dev->name = dev_name(&mwchan->dev); + indio_dev->info = &mw_mm_iio_dev_info; + + + indio_dev->num_channels = 1; /* reg access channel */ + indio_dev->channels = devm_kzalloc(&mwchan->dev, (indio_dev->num_channels) * sizeof(struct iio_chan_spec), GFP_KERNEL); + if(!indio_dev->channels) + return -ENOMEM; + + status = mw_mm_setup_info_channel(indio_dev, chIdx++, + "reg_access", mw_mm_iio_ch_reg_access); + if(status) + return status; + + status = devm_iio_device_register(&mwchan->dev, indio_dev); + if(status) + return status; + + devres_close_group(&mwchan->dev, mw_mm_iio_unregister); + + /* Setup the parent device to tear us down on removal */ + status = devm_add_action(mwchan->dev.parent, mw_mm_iio_unregister, &mwchan->dev); + if(status){ + mw_mm_iio_unregister(&mwchan->dev); + return status; + } + + return 0; +} + +/* Nothing to actually do upon release */ +static void mw_mm_iio_channel_release(struct device *dev) +{ +} + +static struct iio_dev *devm_mw_mm_iio_alloc( + struct mathworks_ipcore_dev *mwdev, + struct device_node *node, + struct mw_mm_iio_channel_info *info) +{ + struct iio_dev *indio_dev; + struct mw_mm_iio_chandev *mwchan; + const char *devname; + int status; + + + if(!devres_open_group(IP2DEVP(mwdev), devm_mw_mm_iio_alloc, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + + indio_dev = devm_iio_device_alloc(IP2DEVP(mwdev), sizeof(struct mw_mm_iio_chandev)); + if (!indio_dev){ + dev_err(IP2DEVP(mwdev), "Failed to allocate memory for channel %s\n",node->name); + return ERR_PTR(-ENOMEM); + } + + mwchan = iio_priv(indio_dev); + mwchan->mwdev = mwdev; + mwchan->iio_direction = info->iio_direction; + + device_initialize(&mwchan->dev); + + mwchan->dev.parent = IP2DEVP(mwdev); + mwchan->dev.of_node = node; + mwchan->dev.id = ida_simple_get(&mw_mm_iio_channel_ida, 0, 0, GFP_KERNEL); + if (mwchan->dev.id < 0) { + return ERR_PTR(mwchan->dev.id); + } + status = devm_add_action(IP2DEVP(mwdev),mw_mm_iio_chan_ida_remove, mwchan); + if(status){ + mw_mm_iio_chan_ida_remove(mwchan); + return ERR_PTR(status); + } + mwchan->dev.release = mw_mm_iio_channel_release; + /* clone the parent's DMA config */ + memcpy(&mwchan->dev.archdata, &IP2DEVP(mwdev)->archdata, sizeof(struct dev_archdata)); + mwchan->dev.coherent_dma_mask = IP2DEVP(mwdev)->coherent_dma_mask; + mwchan->dev.dma_mask = IP2DEVP(mwdev)->dma_mask; + mwchan->dev.dma_range_map = IP2DEVP(mwdev)->dma_range_map; + + + status = of_property_read_string(node, "mathworks,dev-name", &devname); + if (!status) { + /* Use the specified channel name */ + status = dev_set_name(&mwchan->dev, "%s:%s", dev_name(mwchan->mwdev->mw_ip_info->char_device), devname); + } else { + /* Use the node name + dev ID */ + status = dev_set_name(&mwchan->dev, "%s:%s%d", dev_name(mwchan->mwdev->mw_ip_info->char_device), node->name, mwchan->dev.id); + } + if (status) + return ERR_PTR(status); + + status = device_add(&mwchan->dev); + if (status) + return ERR_PTR(status); + + status = devm_add_action(IP2DEVP(mwdev), (devm_action_fn)device_unregister, &mwchan->dev); + if (status) { + device_unregister(&mwchan->dev); + return ERR_PTR(status); + } + + devres_close_group(IP2DEVP(mwdev), devm_mw_mm_iio_alloc); + + return indio_dev; +} + +static int mw_mm_iio_channel_probe( + struct mathworks_ipcore_dev *mwdev, + struct device_node *node, + struct mw_mm_iio_channel_info *info) +{ + int status; + struct iio_dev *indio_dev; + + indio_dev = devm_mw_mm_iio_alloc(mwdev, node, info); + if (IS_ERR(indio_dev)) + return PTR_ERR(indio_dev); + + status = devm_mw_mm_iio_register(indio_dev); + if (status) + return status; + + return 0; +} + +static struct mw_mm_iio_channel_info mw_mm_iio_mm2s_info = { + .iio_direction = IIO_DEVICE_DIRECTION_OUT, +}; + +static struct mw_mm_iio_channel_info mw_mm_iio_s2mm_info = { + .iio_direction = IIO_DEVICE_DIRECTION_IN, +}; + +static const struct of_device_id mw_mm_iio_channel_of_match[] = { + { .compatible = "mathworks,mm-write-channel-v1.00", .data = &mw_mm_iio_mm2s_info}, + { .compatible = "mathworks,mm-read-channel-v1.00", .data = &mw_mm_iio_s2mm_info}, + {}, +}; + +int mw_mm_iio_channels_probe(struct mathworks_ipcore_dev *mwdev) +{ + int status; + + struct device_node *child; + const struct of_device_id *match; + + + for_each_child_of_node(IP2DEVP(mwdev)->of_node,child) { + match = of_match_node(mw_mm_iio_channel_of_match, child); + if(match){ + status = mw_mm_iio_channel_probe(mwdev, child, (struct mw_mm_iio_channel_info *)match->data); + if(status) + return status; + } + } + + return 0; +} + +EXPORT_SYMBOL_GPL(mw_mm_iio_channels_probe); + +static int __init mw_mm_iio_channel_init(void) +{ + return 0; +} + +static void __exit mw_mm_iio_channel_exit(void) +{ + +} + +module_init(mw_mm_iio_channel_init); +module_exit(mw_mm_iio_channel_exit); + +MODULE_AUTHOR("MathWorks, Inc"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MathWorks Memory Mapped IIO Channel"); +MODULE_ALIAS(DRIVER_NAME); diff --git a/drivers/misc/mathworks/mw_mm_iio_channel.h b/drivers/misc/mathworks/mw_mm_iio_channel.h new file mode 100755 index 0000000000000..eff8254c65d86 --- /dev/null +++ b/drivers/misc/mathworks/mw_mm_iio_channel.h @@ -0,0 +1,25 @@ +/* + * MathWorks Streaming Channel + * + * Copyright 2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#ifndef _MW_MM_IIO_CHANNEL_H_ +#define _MW_MM_IIO_CHANNEL_H_ + +#include "mathworks_ipcore.h" + +/********************************************************* +* API functions +*********************************************************/ +#if defined(CONFIG_MWIPCORE_IIO_MM) || defined(CONFIG_MWIPCORE_IIO_MM_MODULE) +extern int mw_mm_iio_channels_probe(struct mathworks_ipcore_dev *mwdev); +#else +static inline int mw_mm_iio_channels_probe(struct mathworks_ipcore_dev *mwdev) { + return -ENODEV; +} +#endif + +#endif /* _MW_MM_IIO_CHANNEL_H_ */ diff --git a/drivers/misc/mathworks/mw_sharedmem_iio_channel.c b/drivers/misc/mathworks/mw_sharedmem_iio_channel.c new file mode 100755 index 0000000000000..ec255bee8525f --- /dev/null +++ b/drivers/misc/mathworks/mw_sharedmem_iio_channel.c @@ -0,0 +1,1090 @@ +/* + * MathWorks Shared Memory Channel + * + * Copyright 2019 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mw_sharedmem_iio_channel.h" +#include "mathworks_ipcore.h" + +static DEFINE_IDA(mw_sharedmem_iio_channel_ida); +static DEFINE_IDA(mw_sharedmem_region_ida); + +#define MWDEV_TO_MWIP(mwdev) (mwdev->mw_ip_info) +#define IP2DEVP(mwdev) (MWDEV_TO_MWIP(mwdev)->dev) + +#define MW_SHAREDMEM_IIO_ENUM IIO_ENUM +#define MW_SHAREDMEM_IIO_ENUM_AVAILABLE(_name, _shared_by, _e) \ +{ \ + .name = (_name "_available"), \ + .shared = (_shared_by), \ + .read = iio_enum_available_read, \ + .private = (uintptr_t)(_e), \ +} + +#define MW_IRQ_ACK_SET (1) +#define MW_IRQ_ACK_CLEAR (2) +#define MW_IRQ_ACK_SET_CLEAR (4) + +enum mw_sharedmem_iio_chan_type { + MW_SHAREDMEM_CHAN_TYPE_READ = 0, + MW_SHAREDMEM_CHAN_TYPE_WRITE, +}; + +struct mw_sharedmem_iio_channel_info { + enum iio_device_direction iio_direction; +}; + +struct mw_sharedmem_region { + void *virt; + phys_addr_t phys; + size_t size; + int read_count; // count of active readers + struct mutex read_count_lock; // protects read_count, only used by readers + struct mutex lock; // ensures mutual exclusion of writers +}; + +struct mw_sharedmem_region_dev { + struct mathworks_ipcore_dev *mwdev; + struct device dev; + const char *name; + int rd_base_reg; // IP core AXI Master Read Base Address + int wr_base_reg; // IP core AXI Master Write Base Address + struct mw_sharedmem_region region; +}; + +/* Setting to determine synchronization with IP core */ +enum mw_sharedmem_iio_ip_sync_mode { + MW_SHAREDMEM_IP_SYNC_MODE_NONE = 0, + MW_SHAREDMEM_IP_SYNC_MODE_INTERRUPT, +}; + +/* Setting to determine whether to automatically set the + IP core AXI Master Read/Write Base Address register */ +enum mw_sharedmem_iio_base_addr_mode { + MW_SHAREDMEM_BASE_ADDR_MODE_AUTO = 0, + MW_SHAREDMEM_BASE_ADDR_MODE_MANUAL, +}; + +struct mw_sharedmem_iio_chandev { + struct mathworks_ipcore_dev *mwdev; + struct mw_sharedmem_region_dev *mwregion; + struct device dev; + struct mw_sharedmem_region *region; + enum mw_sharedmem_iio_chan_type type; + size_t offset; + struct mutex lock; + int irq; + int irq_count; + int irq_ack_reg; + int irq_ack_mask; + int irq_ack_op; + enum mw_sharedmem_iio_base_addr_mode base_addr_mode; + enum mw_sharedmem_iio_ip_sync_mode ip_sync_mode; +}; + +struct mw_sharedmem_buffer { + struct iio_buffer buffer; + struct mw_sharedmem_iio_chandev *mwchan; + bool enabled; + struct mutex lock; +}; + +/*************************** + * Buffer + ***************************/ +static struct mw_sharedmem_buffer *buffer_to_mw_sharedmem_buffer(struct iio_buffer *buffer) +{ + return container_of(buffer, struct mw_sharedmem_buffer, buffer); +} + +static int mw_sharedmem_buffer_write(struct iio_buffer *buffer, size_t n, + const char __user *user_buffer) +{ + struct mw_sharedmem_buffer *sharedmem_buff = buffer_to_mw_sharedmem_buffer(buffer); + struct mw_sharedmem_iio_chandev *mwchan = sharedmem_buff->mwchan; + struct mw_sharedmem_region *region = mwchan->region; + + size_t offset; + int ret; + + if (n < buffer->bytes_per_datum) + return -EINVAL; + + mutex_lock(&mwchan->lock); + offset = mwchan->offset; + mutex_unlock(&mwchan->lock); + + n = ALIGN(n, buffer->bytes_per_datum); + + /* Only handle exact buffer size at end of region */ + if (n > region->size - offset) + return -EFAULT; + + mutex_lock(®ion->lock); + if (copy_from_user(region->virt + offset, user_buffer, n)) { + ret = -EFAULT; + goto out_unlock; + } + + ret = n; + + if (mwchan->irq > 0) { + mutex_lock(&mwchan->lock); + if (mwchan->irq_count > 0) + mwchan->irq_count--; + mutex_unlock(&mwchan->lock); + } + +out_unlock: + mutex_unlock(®ion->lock); + + return ret; +} + +static int mw_sharedmem_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer) +{ + struct mw_sharedmem_buffer *sharedmem_buff = buffer_to_mw_sharedmem_buffer(buffer); + struct mw_sharedmem_iio_chandev *mwchan = sharedmem_buff->mwchan; + struct mw_sharedmem_region *region = mwchan->region; + + size_t offset; + int ret; + + if (n < buffer->bytes_per_datum) + return -EINVAL; + + mutex_lock(&mwchan->lock); + offset = mwchan->offset; + mutex_unlock(&mwchan->lock); + + n = rounddown(n, buffer->bytes_per_datum); + + /* Only handle exact buffer size at end of region */ + if (n > region->size - offset) + return -EFAULT; + + /* Read-prioritized locking */ + mutex_lock(®ion->read_count_lock); + region->read_count++; + if (region->read_count == 1) + mutex_lock(®ion->lock); + mutex_unlock(®ion->read_count_lock); + + if (copy_to_user(user_buffer, region->virt + offset, n)) { + ret = -EFAULT; + goto out_unlock; + } + + ret = n; + + if (mwchan->irq > 0) { + mutex_lock(&mwchan->lock); + if (mwchan->irq_count > 0) + mwchan->irq_count--; + mutex_unlock(&mwchan->lock); + } + +out_unlock: + mutex_lock(®ion->read_count_lock); + region->read_count--; + if (region->read_count == 0) + mutex_unlock(®ion->lock); + mutex_unlock(®ion->read_count_lock); + + return ret; +} + +int mw_sharedmem_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd) +{ + buffer->bytes_per_datum = bpd; + return 0; +} + +static int mw_sharedmem_buffer_set_length(struct iio_buffer *buffer, unsigned int length) +{ + struct mw_sharedmem_buffer *sharedmem_buff = buffer_to_mw_sharedmem_buffer(buffer); + struct mw_sharedmem_region *region = sharedmem_buff->mwchan->region; + if (length < 1) + length = 1; + if (length > region->size) + length = region->size; + buffer->length = length; + return 0; +} + +static int mw_sharedmem_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev) +{ + struct mw_sharedmem_buffer *sharedmem_buff = buffer_to_mw_sharedmem_buffer(buffer); + mutex_lock(&sharedmem_buff->lock); + sharedmem_buff->enabled = true; + mutex_unlock(&sharedmem_buff->lock); + return 0; +} + +static int mw_sharedmem_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev) +{ + struct mw_sharedmem_buffer *sharedmem_buff = buffer_to_mw_sharedmem_buffer(buffer); + mutex_lock(&sharedmem_buff->lock); + sharedmem_buff->enabled = false; + mutex_unlock(&sharedmem_buff->lock); + return 0; +} + +static size_t mw_sharedmem_buffer_data_available(struct iio_buffer *buffer) +{ + struct mw_sharedmem_buffer *sharedmem_buff = buffer_to_mw_sharedmem_buffer(buffer); + struct mw_sharedmem_iio_chandev *mwchan = sharedmem_buff->mwchan; + struct mw_sharedmem_region *region = mwchan->region; + + size_t size; + + mutex_lock(&mwchan->lock); + + if ((mwchan->ip_sync_mode == MW_SHAREDMEM_IP_SYNC_MODE_INTERRUPT) && (mwchan->irq > 0)) { + size = mwchan->irq_count ? region->size : 0; + } else { + size = region->size; + } + + mutex_unlock(&mwchan->lock); + + return size; +} + +static bool mw_sharedmem_buffer_space_available(struct iio_buffer *buffer) +{ + struct mw_sharedmem_buffer *sharedmem_buff = buffer_to_mw_sharedmem_buffer(buffer); + struct mw_sharedmem_iio_chandev *mwchan = sharedmem_buff->mwchan; + bool space_available; + + mutex_lock(&mwchan->lock); + + if ((mwchan->ip_sync_mode == MW_SHAREDMEM_IP_SYNC_MODE_INTERRUPT) && (mwchan->irq > 0)) { + space_available = (mwchan->irq_count > 0); + } else { + space_available = true; + } + + mutex_unlock(&mwchan->lock); + + return space_available; +} + +static void mw_sharedmem_buffer_release(struct iio_buffer *buffer) +{ + struct mw_sharedmem_buffer *sharedmem_buff = buffer_to_mw_sharedmem_buffer(buffer); + mutex_destroy(&sharedmem_buff->lock); +} + +static const struct iio_buffer_access_funcs mw_sharedmem_access_func = { + .read = mw_sharedmem_buffer_read, + .write = mw_sharedmem_buffer_write, + .set_bytes_per_datum = mw_sharedmem_buffer_set_bytes_per_datum, + .set_length = mw_sharedmem_buffer_set_length, + .enable = mw_sharedmem_buffer_enable, + .disable = mw_sharedmem_buffer_disable, + .data_available = mw_sharedmem_buffer_data_available, + .space_available = mw_sharedmem_buffer_space_available, + .release = mw_sharedmem_buffer_release, + + .modes = INDIO_BUFFER_HARDWARE, +}; + +static int mw_sharedmem_buffer_init(struct mw_sharedmem_buffer *sharedmem_buff, + struct mw_sharedmem_iio_chandev *mwchan) +{ + iio_buffer_init(&sharedmem_buff->buffer); + sharedmem_buff->buffer.access = &mw_sharedmem_access_func; + sharedmem_buff->mwchan = mwchan; + sharedmem_buff->enabled = false; + mutex_init(&sharedmem_buff->lock); + return 0; +} + +static struct iio_buffer *mw_sharedmem_buffer_alloc(struct device *dev, + struct mw_sharedmem_iio_chandev *mwchan) +{ + struct mw_sharedmem_buffer *sharedmem_buff; + + sharedmem_buff = devm_kzalloc(dev, sizeof(*sharedmem_buff), GFP_KERNEL); + if (!sharedmem_buff) + return ERR_PTR(-ENOMEM); + + mw_sharedmem_buffer_init(sharedmem_buff, mwchan); + + return &sharedmem_buff->buffer; +} + +static void mw_sharedmem_buffer_free(struct iio_buffer *buffer) +{ + iio_buffer_put(buffer); +} + +/************* + * IRQ + *************/ + +static void mw_sharedmem_irq_ack(struct mw_sharedmem_iio_chandev *mwchan) +{ + uint32_t val; + + if (mwchan->irq_ack_reg < 0) + return; + + val = mw_ip_read32(mwchan->mwdev->mw_ip_info, mwchan->irq_ack_reg); + + /* First write: SET or CLEAR */ + if (mwchan->irq_ack_op & (MW_IRQ_ACK_SET | MW_IRQ_ACK_SET_CLEAR)) { + mw_ip_write32(mwchan->mwdev->mw_ip_info, mwchan->irq_ack_reg, val | mwchan->irq_ack_mask); + } else { + mw_ip_write32(mwchan->mwdev->mw_ip_info, mwchan->irq_ack_reg, val & ~mwchan->irq_ack_mask); + } + + /* Second write: CLEAR after SET */ + if (mwchan->irq_ack_op & MW_IRQ_ACK_SET_CLEAR) { + mw_ip_write32(mwchan->mwdev->mw_ip_info, mwchan->irq_ack_reg, val & ~mwchan->irq_ack_mask); + } + +} + +static irqreturn_t mw_sharedmem_irq_handler(int irq, void *data) +{ + struct iio_dev *indio_dev = data; + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + + /* Ack the interrupt */ + mw_sharedmem_irq_ack(mwchan); + + mutex_lock(&mwchan->lock); + mwchan->irq_count++; + mutex_unlock(&mwchan->lock); + + wake_up(&indio_dev->buffer->pollq); + + return IRQ_HANDLED; +} + +/************* + * IP Sync Modes + *************/ +static const char * const mw_sharedmem_iio_channel_ip_sync_modes[] = { "none", "interrupt" }; + +static int mw_sharedmem_iio_channel_get_ip_sync_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + int mode; + + mutex_lock(&indio_dev->mlock); + mode = mwchan->ip_sync_mode; + mutex_unlock(&indio_dev->mlock); + + return mode; +} + +static int mw_sharedmem_iio_channel_set_ip_sync_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, unsigned int mode) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + + if ((mode == MW_SHAREDMEM_IP_SYNC_MODE_INTERRUPT) && (mwchan->irq < 0)) { + return -EINVAL; + } + + mutex_lock(&indio_dev->mlock); + mwchan->ip_sync_mode = mode; + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +static const struct iio_enum mw_sharedmem_iio_channel_ip_sync_mode_enum = { + .items = mw_sharedmem_iio_channel_ip_sync_modes, + .num_items = ARRAY_SIZE(mw_sharedmem_iio_channel_ip_sync_modes), + .get = mw_sharedmem_iio_channel_get_ip_sync_mode, + .set = mw_sharedmem_iio_channel_set_ip_sync_mode, +}; + +/************* + * IP Base Address Register Modes + *************/ +static const char * const mw_sharedmem_iio_channel_base_addr_modes[] = { "auto", "manual" }; + +static int mw_sharedmem_iio_channel_get_base_addr_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + int mode; + + mutex_lock(&indio_dev->mlock); + mode = mwchan->base_addr_mode; + mutex_unlock(&indio_dev->mlock); + + return mode; +} + +static int mw_sharedmem_iio_channel_set_base_addr_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, unsigned int mode) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + + mutex_lock(&indio_dev->mlock); + mwchan->base_addr_mode = mode; + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +static const struct iio_enum mw_sharedmem_iio_channel_base_addr_mode_enum = { + .items = mw_sharedmem_iio_channel_base_addr_modes, + .num_items = ARRAY_SIZE(mw_sharedmem_iio_channel_base_addr_modes), + .get = mw_sharedmem_iio_channel_get_base_addr_mode, + .set = mw_sharedmem_iio_channel_set_base_addr_mode, +}; + +static const struct iio_chan_spec_ext_info mw_sharedmem_iio_ch_ip_info[] = { + MW_SHAREDMEM_IIO_ENUM("base_addr_mode", IIO_SHARED_BY_ALL, &mw_sharedmem_iio_channel_base_addr_mode_enum), + MW_SHAREDMEM_IIO_ENUM_AVAILABLE("base_addr_mode", IIO_SHARED_BY_ALL, &mw_sharedmem_iio_channel_base_addr_mode_enum), + MW_SHAREDMEM_IIO_ENUM("ip_sync_mode", IIO_SHARED_BY_ALL, &mw_sharedmem_iio_channel_ip_sync_mode_enum), + MW_SHAREDMEM_IIO_ENUM_AVAILABLE("ip_sync_mode", IIO_SHARED_BY_ALL, &mw_sharedmem_iio_channel_ip_sync_mode_enum), + { }, +}; + +/*************************** + * IIO channel dev + ***************************/ + + static void mw_sharedmem_config_ipcore_reg(struct mw_sharedmem_iio_chandev *mwchan) +{ + uint32_t base_addr32; + + base_addr32 = (uint32_t)((size_t)mwchan->region->phys & 0xFFFFFFFF); + + if (mwchan->mwregion->rd_base_reg >= 0) { + /* Set the IP core's AXI Master Read Base Address */ + mw_ip_write32(mwchan->mwdev->mw_ip_info, mwchan->mwregion->rd_base_reg, base_addr32); + } + if (mwchan->mwregion->wr_base_reg >= 0) { + /* Set the IP core's AXI Master Write Base Address */ + mw_ip_write32(mwchan->mwdev->mw_ip_info, mwchan->mwregion->wr_base_reg, base_addr32); + } +} + +static void mw_sharedmem_iio_chan_ida_remove(void *opaque){ + struct mw_sharedmem_iio_chandev* mwchan = opaque; + ida_simple_remove(&mw_sharedmem_iio_channel_ida, mwchan->dev.id); +} +static void mw_sharedmem_region_ida_remove(void *opaque){ + struct mw_sharedmem_region_dev* mwregion = opaque; + ida_simple_remove(&mw_sharedmem_region_ida, mwregion->dev.id); +} + +static int mw_sharedmem_iio_buffer_preenable(struct iio_dev *indio_dev) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + + if (mwchan->base_addr_mode == MW_SHAREDMEM_BASE_ADDR_MODE_AUTO) { + mw_sharedmem_config_ipcore_reg(mwchan); + } + + return 0; +} + +static const struct iio_buffer_setup_ops mw_sharedmem_iio_buffer_setup_ops = { + + .preenable = &mw_sharedmem_iio_buffer_preenable, +}; + +static const struct iio_info mw_sharedmem_iio_chandev_info; + +static int devm_mw_sharedmem_configure_buffer(struct iio_dev *indio_dev) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + struct iio_buffer *buffer; + int status; + + buffer = mw_sharedmem_buffer_alloc(indio_dev->dev.parent, mwchan); + if (IS_ERR(buffer)) { + dev_err(&indio_dev->dev, "Failed to configure IIO buffer: %ld\n", PTR_ERR(buffer)); + return PTR_ERR(buffer); + } + + status = devm_add_action(indio_dev->dev.parent,(devm_action_fn)mw_sharedmem_buffer_free, buffer); + if(status){ + mw_sharedmem_buffer_free(buffer); + return status; + } + + iio_device_attach_buffer(indio_dev, buffer); + + indio_dev->modes = INDIO_BUFFER_HARDWARE; + indio_dev->setup_ops = &mw_sharedmem_iio_buffer_setup_ops; + + return 0; +} + +static int devm_mw_sharedmem_configure_irq(struct iio_dev *indio_dev) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + int status; + + if (mwchan->irq <= 0) + return 0; + + status = devm_request_irq(&mwchan->dev, mwchan->irq, mw_sharedmem_irq_handler, + 0, dev_name(&mwchan->dev), indio_dev); + if (status) { + dev_err(&indio_dev->dev, "Failed to request IRQ %d\n", mwchan->irq); + return status; + } + + return 0; +} + +static int mw_sharedmem_setup_ip_channel(struct iio_dev *indio_dev, struct iio_chan_spec *channel){ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + + channel->type = IIO_GENERIC_DATA; + channel->indexed = 1; + channel->extend_name = devm_kstrdup(&mwchan->dev, "ip_info", GFP_KERNEL); + if (!channel->extend_name) + return -ENOMEM; + channel->ext_info = mw_sharedmem_iio_ch_ip_info; + channel->scan_index = -ENODEV; + + return 0; +} + +static int mw_sharedmem_setup_data_channel(struct iio_dev *indio_dev, struct iio_chan_spec *channel) { + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + int status; + unsigned long *available_scan_masks; + + channel->indexed = 1; + channel->type = IIO_GENERIC_DATA; + if (indio_dev->direction == IIO_DEVICE_DIRECTION_OUT) + channel->output = 1; + channel->channel = 0; + channel->scan_index = 0; + status = of_property_read_string(mwchan->dev.of_node, "mathworks,chan-name", &channel->extend_name); + if (status) + channel->extend_name = NULL; + + /* Set scan type to unsigned byte */ + channel->scan_type.sign = 'u'; + channel->scan_type.storagebits = 8; + channel->scan_type.realbits = 8; + channel->scan_type.shift = 0; + + available_scan_masks = devm_kzalloc(&mwchan->dev, sizeof(unsigned long)*2, GFP_KERNEL); + if(!available_scan_masks) + return -ENOMEM; + available_scan_masks[0] = 1; + indio_dev->available_scan_masks = available_scan_masks; + + return 0; +} + +static ssize_t mw_sharedmem_iio_channel_get_offset(struct iio_dev *indio_dev, + uintptr_t priv, const struct iio_chan_spec *chan, char *buf) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + unsigned long long offset; + + mutex_lock(&mwchan->lock); + offset = (unsigned long long)mwchan->offset; + mutex_unlock(&mwchan->lock); + + return snprintf(buf, PAGE_SIZE, "%llu\n", offset); +} + +static ssize_t mw_sharedmem_iio_channel_set_offset(struct iio_dev *indio_dev, + uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, size_t len) +{ + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + unsigned long long result; + ssize_t ret; + + ret = kstrtoull(buf, 0, &result); + if (ret) + return ret; + + if (result >= mwchan->region->size) { + return -EINVAL; + } + + mutex_lock(&mwchan->lock); + mwchan->offset = (ssize_t)result; + mutex_unlock(&mwchan->lock); + + return len; +} + +static const struct iio_chan_spec_ext_info mw_sharedmem_iio_offset_channel_info = { + .name = "offset", + .shared = IIO_SHARED_BY_ALL, + .read = mw_sharedmem_iio_channel_get_offset, + .write = mw_sharedmem_iio_channel_set_offset, +}; + +static int mw_sharedmem_setup_offset_channel(struct iio_dev *indio_dev, struct iio_chan_spec *channel) { + + channel->type = IIO_GENERIC_DATA; + channel->indexed = 1; + channel->ext_info = &mw_sharedmem_iio_offset_channel_info; + channel->scan_index = -ENODEV; + + return 0; +} + +static void mw_sharedmem_iio_unregister(void *opaque) { + struct device *dev = opaque; + + /* Unregister the IIO device */ + devres_release_group(dev, mw_sharedmem_iio_unregister); +} + +static int devm_mw_sharedmem_iio_register(struct iio_dev *indio_dev, enum iio_device_direction direction) { + struct mw_sharedmem_iio_chandev *mwchan = iio_priv(indio_dev); + int status; + + if(!devres_open_group(&mwchan->dev, mw_sharedmem_iio_unregister, GFP_KERNEL)) + return -ENOMEM; + + indio_dev->dev.parent = &mwchan->dev; + indio_dev->name = dev_name(&mwchan->dev); + indio_dev->info = &mw_sharedmem_iio_chandev_info; + indio_dev->direction = direction; + indio_dev->num_channels = 3; // data, offset, ip + + indio_dev->channels = devm_kzalloc(&mwchan->dev, (indio_dev->num_channels) * sizeof(struct iio_chan_spec), GFP_KERNEL); + if(!indio_dev->channels) + return -ENOMEM; + + status = mw_sharedmem_setup_data_channel(indio_dev, (struct iio_chan_spec *)&indio_dev->channels[0]); + if(status) + return status; + + status = mw_sharedmem_setup_offset_channel(indio_dev, (struct iio_chan_spec *)&indio_dev->channels[1]); + if(status) + return status; + + status = mw_sharedmem_setup_ip_channel(indio_dev, (struct iio_chan_spec *)&indio_dev->channels[2]); + if(status) + return status; + + status = devm_mw_sharedmem_configure_buffer(indio_dev); + if (status){ + return status; + } + + status = devm_mw_sharedmem_configure_irq(indio_dev); + if (status){ + return status; + } + + status = devm_iio_device_register(&mwchan->dev, indio_dev); + if(status) + return status; + + devres_close_group(&mwchan->dev, mw_sharedmem_iio_unregister); + + /* Setup the parent device to tear us down on removal */ + status = devm_add_action(mwchan->dev.parent, mw_sharedmem_iio_unregister, &mwchan->dev); + if(status){ + mw_sharedmem_iio_unregister(&mwchan->dev); + return status; + } + + return 0; +} + +static void mw_sharedmem_iio_channel_release(struct device *dev) +{ + struct mw_sharedmem_iio_chandev *mwchan = (struct mw_sharedmem_iio_chandev *)dev->driver_data; + mutex_destroy(&mwchan->lock); + if (mwchan->irq > 0) + free_irq(mwchan->irq, mwchan); +} + +static struct iio_dev *devm_mw_sharedmem_iio_alloc( + struct mw_sharedmem_region_dev *mwregion, + enum iio_device_direction direction, + struct device_node *node) +{ + struct iio_dev *indio_dev; + struct mathworks_ipcore_dev *mwdev = mwregion->mwdev; + struct mw_sharedmem_iio_chandev *mwchan; + const char *devname; + int irq_ack_info[3]; + int status; + + if(!devres_open_group(IP2DEVP(mwdev), devm_mw_sharedmem_iio_alloc, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + + indio_dev = devm_iio_device_alloc(IP2DEVP(mwdev), sizeof(struct mw_sharedmem_iio_chandev)); + if (!indio_dev){ + dev_err(IP2DEVP(mwdev), "Failed to allocate memory for channel %s\n",node->name); + return ERR_PTR(-ENOMEM); + } + + mwchan = iio_priv(indio_dev); + mwchan->mwdev = mwdev; + mwchan->mwregion = mwregion; + if (direction == IIO_DEVICE_DIRECTION_OUT) { + mwchan->type = MW_SHAREDMEM_CHAN_TYPE_WRITE; + } else { + mwchan->type = MW_SHAREDMEM_CHAN_TYPE_READ; + } + mwchan->region = &mwregion->region; + mwchan->offset = 0; + mutex_init(&mwchan->lock); + + /* look for IRQ */ + mwchan->irq = of_irq_get(node, 0); + + if (mwchan->irq == 0) { + return ERR_PTR(-ENOENT); + } + if (mwchan->irq > 0) { + status = of_property_read_u32_array(node, "mathworks,irq-ack-info", &irq_ack_info[0], 3); + if(status) { + mwchan->irq_ack_reg = -EINVAL; + mwchan->irq_ack_mask = -EINVAL; + mwchan->irq_ack_op = -EINVAL; + } else { + mwchan->irq_ack_reg = irq_ack_info[0]; + mwchan->irq_ack_mask = irq_ack_info[1]; + mwchan->irq_ack_op = irq_ack_info[2]; + } + } + mwchan->irq_count = 0; + + mwchan->base_addr_mode = MW_SHAREDMEM_BASE_ADDR_MODE_MANUAL; + mwchan->ip_sync_mode = MW_SHAREDMEM_IP_SYNC_MODE_NONE; + + device_initialize(&mwchan->dev); + + mwchan->dev.parent = IP2DEVP(mwdev); + mwchan->dev.driver_data = (void*)mwchan; + mwchan->dev.of_node = node; + mwchan->dev.id = ida_simple_get(&mw_sharedmem_iio_channel_ida, 0, 0, GFP_KERNEL); + if (mwchan->dev.id < 0) { + return ERR_PTR(mwchan->dev.id); + } + status = devm_add_action(IP2DEVP(mwdev),mw_sharedmem_iio_chan_ida_remove, mwchan); + if(status){ + mw_sharedmem_iio_chan_ida_remove(mwchan); + return ERR_PTR(status); + } + mwchan->dev.release = mw_sharedmem_iio_channel_release; + /* clone the parent's DMA config */ + memcpy(&mwchan->dev.archdata, &IP2DEVP(mwdev)->archdata, sizeof(struct dev_archdata)); + mwchan->dev.coherent_dma_mask = IP2DEVP(mwdev)->coherent_dma_mask; + mwchan->dev.dma_mask = IP2DEVP(mwdev)->dma_mask; + + status = of_property_read_string(node, "mathworks,dev-name", &devname); + if (!status) { + /* Use the specified channel name */ + status = dev_set_name(&mwchan->dev, "%s:%s", mwregion->name, devname); + } else { + /* Use the node name + dev ID */ + status = dev_set_name(&mwchan->dev, "%s:%s%d", mwregion->name, node->name, mwchan->dev.id); + } + if (status) + return ERR_PTR(status); + + status = device_add(&mwchan->dev); + if (status) + return ERR_PTR(status); + + status = devm_add_action(IP2DEVP(mwdev), (devm_action_fn)device_unregister, &mwchan->dev); + if (status) { + device_unregister(&mwchan->dev); + return ERR_PTR(status); + } + + devres_close_group(IP2DEVP(mwdev), devm_mw_sharedmem_iio_alloc); + + return indio_dev; +} + +static int mw_sharedmem_iio_channel_probe( + struct mw_sharedmem_region_dev *mwregion, + struct device_node *node, + enum iio_device_direction direction) +{ + int status; + struct iio_dev *indio_dev; + + indio_dev = devm_mw_sharedmem_iio_alloc(mwregion, direction, node); + if (IS_ERR(indio_dev)) + return PTR_ERR(indio_dev); + + status = devm_mw_sharedmem_iio_register(indio_dev, direction); + if (status) + return status; + + return 0; +} + +static struct mw_sharedmem_iio_channel_info mw_sharedmem_iio_write_info = { + .iio_direction = IIO_DEVICE_DIRECTION_OUT, +}; + +static struct mw_sharedmem_iio_channel_info mw_sharedmem_iio_read_info = { + .iio_direction = IIO_DEVICE_DIRECTION_IN, +}; + +static const struct of_device_id mw_sharedmem_iio_channel_of_match[] = { + { .compatible = "mathworks,sharedmem-write-channel-v1.00", .data = &mw_sharedmem_iio_write_info}, + { .compatible = "mathworks,sharedmem-read-channel-v1.00", .data = &mw_sharedmem_iio_read_info}, + {}, +}; + +static int mw_sharedmem_count_iio_channels(struct device_node *node) { + struct device_node *child; + const struct of_device_id *match; + int count = 0; + for_each_child_of_node(node, child) { + match = of_match_node(mw_sharedmem_iio_channel_of_match, child); + if(match) + count++; + } + return count; +} + + + +/*************************** + * Memory region dev + ***************************/ + +static int mw_sharedmem_region_init(struct mw_sharedmem_region *region, struct resource *r) +{ + region->phys = (phys_addr_t)r->start; + region->size = (size_t)resource_size(r); + region->virt = memremap(region->phys, region->size, MEMREMAP_WC); + if (region->virt == NULL) { + return -ENOMEM; + } + region->read_count = 0; + mutex_init(®ion->read_count_lock); + mutex_init(®ion->lock); + return 0; +} + +static void mw_sharedmem_region_release(struct device *dev) +{ + struct mw_sharedmem_region_dev *mwregion = (struct mw_sharedmem_region_dev *)dev->driver_data; + mutex_destroy(&mwregion->region.lock); +} + +static struct mw_sharedmem_region_dev *devm_mw_sharedmem_region_alloc( + struct mathworks_ipcore_dev *mwdev, + struct device_node *node) +{ + struct mw_sharedmem_region_dev *mwregion; + struct device_node *np; + struct resource r; + const char *devname; + int status; + + if(!devres_open_group(IP2DEVP(mwdev), devm_mw_sharedmem_region_alloc, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + + /* Find reserved memory region node */ + np = of_parse_phandle(node, "memory-region", 0); + if (!np) { + dev_err(IP2DEVP(mwdev), "Missing memory-region property for node: %s\n", node->name); + return ERR_PTR(-ENODEV); + } + + /* Get the address assigned to the memory region */ + status = of_address_to_resource(np, 0, &r); + if (status) { + dev_err(IP2DEVP(mwdev), "No memory address assigned to region: %s\n",np->name); + return ERR_PTR(status); + } + + mwregion = devm_kzalloc(IP2DEVP(mwdev),sizeof(*mwregion), GFP_KERNEL); + if (!mwregion) { + dev_err(IP2DEVP(mwdev), "Failed to allocate memory for shared memory region %s\n",np->name); + return ERR_PTR(-ENOMEM); + } + + status = mw_sharedmem_region_init(&mwregion->region, &r); + if (status) { + dev_err(IP2DEVP(mwdev), "Failed to initialize shared memory region\n"); + return ERR_PTR(status); + } + + mwregion->mwdev = mwdev; + + status = of_property_read_u32(node, "mathworks,rd-base-reg", &mwregion->rd_base_reg); + if(status) + mwregion->rd_base_reg = -EINVAL; + + status = of_property_read_u32(node, "mathworks,wr-base-reg", &mwregion->wr_base_reg); + if(status) + mwregion->wr_base_reg = -EINVAL; + + device_initialize(&mwregion->dev); + + mwregion->dev.parent = IP2DEVP(mwdev); + mwregion->dev.driver_data = (void*)mwregion; + mwregion->dev.of_node = node; + mwregion->dev.id = ida_simple_get(&mw_sharedmem_region_ida, 0, 0, GFP_KERNEL); + if (mwregion->dev.id < 0) { + return ERR_PTR(mwregion->dev.id); + } + status = devm_add_action(IP2DEVP(mwdev),mw_sharedmem_region_ida_remove, mwregion); + if(status){ + mw_sharedmem_region_ida_remove(mwregion); + return ERR_PTR(status); + } + mwregion->dev.release = mw_sharedmem_region_release; + /* clone the parent's DMA config */ + memcpy(&mwregion->dev.archdata, &IP2DEVP(mwdev)->archdata, sizeof(struct dev_archdata)); + mwregion->dev.coherent_dma_mask = IP2DEVP(mwdev)->coherent_dma_mask; + mwregion->dev.dma_mask = IP2DEVP(mwdev)->dma_mask; + + status = of_property_read_string(node, "mathworks,dev-name", &devname); + if (!status) { + /* Use the specified device name */ + status = dev_set_name(&mwregion->dev, "%s:%s", dev_name(mwregion->mwdev->mw_ip_info->char_device), devname); + } else { + /* Use the node name + dev ID */ + status = dev_set_name(&mwregion->dev, "%s:%s%d", dev_name(mwregion->mwdev->mw_ip_info->char_device), node->name, mwregion->dev.id); + } + if (status) + return ERR_PTR(status); + + mwregion->name = dev_name(&mwregion->dev); + + status = device_add(&mwregion->dev); + if (status) + return ERR_PTR(status); + + status = devm_add_action(IP2DEVP(mwdev), (devm_action_fn)device_unregister, &mwregion->dev); + if (status) { + device_unregister(&mwregion->dev); + return ERR_PTR(status); + } + + devres_close_group(IP2DEVP(mwdev), devm_mw_sharedmem_region_alloc); + + return mwregion; +} + +static int mw_sharedmem_iio_probe( + struct mathworks_ipcore_dev *mwdev, + struct device_node *node) +{ + struct mw_sharedmem_region_dev *mwregion; + struct device_node *child; + const struct of_device_id *match; + struct mw_sharedmem_iio_channel_info *info; + char *size_fmt = "MB"; + size_t size_disp; + int status; + + /* At least one read/write channel child node must exist */ + if (!mw_sharedmem_count_iio_channels(node)) { + dev_err(IP2DEVP(mwdev), "No read/write channels found for node : %s\n",node->name); + return -EINVAL; + } + + mwregion = devm_mw_sharedmem_region_alloc(mwdev, node); + if (IS_ERR(mwregion)) { + dev_err(IP2DEVP(mwdev), "Failed to configure shared memory region: %ld\n", PTR_ERR(mwregion)); + return PTR_ERR(mwregion); + } else { + /* If region size is >= 1 MB, display size as MB */ + if (mwregion->region.size >= (2^20)){ + size_disp = mwregion->region.size >> 20; + } + /* Otherwise display as kB */ + else { + size_disp = mwregion->region.size >> 10; + size_fmt[0] = 'k'; + } + dev_info(IP2DEVP(mwdev), "Allocated reserved memory, virt: 0x%0zX, phys: 0x%0zX, size: %zd %s\n", + (size_t)mwregion->region.virt, (size_t)mwregion->region.phys, size_disp, size_fmt); + } + + /* Probe the read/write channels */ + for_each_child_of_node(node,child) { + match = of_match_node(mw_sharedmem_iio_channel_of_match, child); + if(match){ + info = (struct mw_sharedmem_iio_channel_info *)match->data; + status = mw_sharedmem_iio_channel_probe(mwregion, child, info->iio_direction); + if(status) + return status; + } + } + + return 0; +} + +static const struct of_device_id mw_sharedmem_iio_of_match[] = { + { .compatible = "mathworks,sharedmem-v1.00"}, + {}, +}; + +int mw_sharedmem_iio_channels_probe(struct mathworks_ipcore_dev *mwdev) +{ + int status; + + struct device_node *child; + const struct of_device_id *match; + + + for_each_child_of_node(IP2DEVP(mwdev)->of_node,child) { + match = of_match_node(mw_sharedmem_iio_of_match, child); + if(match){ + status = mw_sharedmem_iio_probe(mwdev, child); + if(status) + return status; + } + } + + return 0; +} + +EXPORT_SYMBOL_GPL(mw_sharedmem_iio_channels_probe); + +static int __init mw_sharedmem_iio_channel_init(void) +{ + return 0; +} + +static void __exit mw_sharedmem_iio_channel_exit(void) +{ + +} + +module_init(mw_sharedmem_iio_channel_init); +module_exit(mw_sharedmem_iio_channel_exit); + +MODULE_AUTHOR("MathWorks, Inc"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MathWorks Shared Memory IIO Channel"); +MODULE_ALIAS(DRIVER_NAME); diff --git a/drivers/misc/mathworks/mw_sharedmem_iio_channel.h b/drivers/misc/mathworks/mw_sharedmem_iio_channel.h new file mode 100755 index 0000000000000..0451bc397ed1f --- /dev/null +++ b/drivers/misc/mathworks/mw_sharedmem_iio_channel.h @@ -0,0 +1,25 @@ +/* + * MathWorks Shared Memory Channel + * + * Copyright 2019 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#ifndef _MW_SHAREDMEM_IIO_CHANNEL_H_ +#define _MW_SHAREDMEM_IIO_CHANNEL_H_ + +#include "mathworks_ipcore.h" + +/********************************************************* +* API functions +*********************************************************/ +#if defined(CONFIG_MWIPCORE_IIO_SHAREDMEM) || defined(CONFIG_MWIPCORE_IIO_SHAREDMEM_MODULE) +extern int mw_sharedmem_iio_channels_probe(struct mathworks_ipcore_dev *mwdev); +#else +static inline int mw_sharedmem_iio_channels_probe(struct mathworks_ipcore_dev *mwdev) { + return -ENODEV; +} +#endif + +#endif /* _MW_SHAREDMEM_IIO_CHANNEL_H_ */ diff --git a/drivers/misc/mathworks/mw_stream_channel.c b/drivers/misc/mathworks/mw_stream_channel.c new file mode 100755 index 0000000000000..b20dc0a04dbcb --- /dev/null +++ b/drivers/misc/mathworks/mw_stream_channel.c @@ -0,0 +1,1256 @@ +/* + * MathWorks Streaming Channel + * + * Copyright 2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#include +#include + +#include "mw_stream_channel.h" +#include "mathworks_ipcore.h" + +#define STREAMDEV_TO_MWCHAN(dev_ptr) (container_of(dev_ptr, struct mwadma_chan, dev)) +#define MWDEV_TO_MWIP(mwdev) (mwdev->mw_ipcore_dev->mw_ip_info) +#define IP2DEVP(mwdev) (MWDEV_TO_MWIP(mwdev)->dev) +#define IP2DEV(mwdev) (*IP2DEVP(mwdev)) + +static DEFINE_IDA(mw_stream_channel_ida); +static atomic64_t rxcount = ATOMIC64_INIT(0); +static LIST_HEAD(mwadma_rx_userid); +/* + * Forward declaration of functions + */ +/*************************************************************************/ +static int mw_axidma_setupchannel(struct mwadma_dev *mwdev, + struct mwadma_chan *mwchan, + struct mw_axidma_params *usrbuf); + +static int mw_axidma_alloc(struct mwadma_dev *mwdev, size_t bufferSize); +static int mwadma_mmap(struct file *fp, struct vm_area_struct *vma); + +static void mwadma_free_channel(struct mwadma_dev *mwdev, + struct mwadma_chan *mwchan); + +static void mwdma_test_loopback(struct mwadma_dev * mwdev, + struct mw_axidma_params chn_prm); + +int mwadma_start(struct mwadma_chan *mwchan); +/* + * @brief mwadma_fasync_impl + */ +static int mwadma_fasync_impl(int fd, struct file* fp, int mode) +{ + struct mwadma_dev *mwdev = fp->private_data; + return fasync_helper(fd, fp, mode, &mwdev->asyncq); +} + +/* + * @brief mwadma_open + */ +static int mwadma_open(struct inode *inode, struct file *fp) +{ + struct mathworks_ip_info *mw_ip_info; + struct mathworks_ipcore_dev *mw_ipcore_dev; + struct mwadma_dev *mwdev; + if (inode == NULL) + { + MW_DBG_text("INODE is NULL\n"); + } + mw_ip_info = container_of(inode->i_cdev, struct mathworks_ip_info, cdev); + mw_ipcore_dev = (struct mathworks_ipcore_dev *)mw_ip_info->private; + mwdev = (struct mwadma_dev *)mw_ipcore_dev->private; + fp->private_data = mwdev; + + return 0; +} + +/* + * @brief mwadma_allocate_desc + */ +static int mwadma_allocate_desc(struct mwadma_slist **new, struct mwadma_chan *mwchan, unsigned int this_idx) +{ + struct mwadma_slist *tmp; + size_t ring_bytes; + + ring_bytes = mwchan->length/mwchan->ring_total; + tmp = devm_kmalloc(&mwchan->dev, sizeof(struct mwadma_slist),GFP_KERNEL); + if (!tmp) { + return -ENOMEM; + } + /* set buffer at offset from larger buffer */ + tmp->phys = mwchan->phys+this_idx*ring_bytes; + tmp->length = ring_bytes; + tmp->buffer_index = this_idx; + tmp->state = MWDMA_READY; + tmp->qchan = mwchan; + INIT_LIST_HEAD(&(tmp->userid)); + dev_dbg(&mwchan->dev,"buf_phys_addr 0x%08lx, size %zu\n", (unsigned long) tmp->phys, tmp->length); + *new = tmp; + return 0; +} + +/* + * @brief mwadma_free_desc + */ +static void mwadma_free_desc(struct mwadma_slist *desc, struct mwadma_chan *mwchan) { + devm_kfree(&mwchan->dev, desc); +} + +/* + * @brief mwadma_prep_desc + */ +static int mwadma_prep_desc(struct mwadma_dev *mwdev, struct mwadma_chan * mwchan) +{ + unsigned int i = 0; + int ret; + struct mwadma_slist *new; + struct mwadma_slist *b; + struct mwadma_slist **blocks; + + blocks = devm_kmalloc(&mwchan->dev, sizeof(*blocks)*mwchan->ring_total, GFP_KERNEL); + if (!blocks) { + return -ENOMEM; + } + + ret = mwadma_allocate_desc(&(mwchan->scatter), mwchan, 0); + if (ret) { + dev_err(&mwchan->dev, "Failed in mwadma_allocate_desc"); + return -ENOMEM; + } + INIT_LIST_HEAD(&(mwchan->scatter->list)); + for(i = 1; i < mwchan->ring_total; i++) /* POOL_SIZE - 1 */ + { + ret = mwadma_allocate_desc(&(new), mwchan, i); + if ((ret < 0) || (new == NULL)) { + dev_err(&mwchan->dev, "Failed in mwadma_allocate_desc"); + return -ENOMEM; + } + list_add_tail(&(new->list),&(mwchan->scatter->list)); + } + mwchan->curr = mwchan->scatter; /*Head of the list*/ + mwchan->prev = list_entry(mwchan->curr->list.prev, struct mwadma_slist, list); + blocks[0] = mwchan->scatter; + i = 1; + list_for_each_entry(b, &(mwchan->scatter->list), list){ + blocks[i] = b; + i++; + } + mwchan->blocks = blocks; + mwchan->status = ready; + return 0; +} + +void mwadma_tx_cb_single_signal(void *data) +{ + struct mwadma_slist *block = data; + struct mwadma_chan *mwchan = block->qchan; + unsigned long flags; + + spin_lock_irqsave(&mwchan->slock, flags); + mwchan->blocks[block->buffer_index]->state = MWDMA_READY; + mwchan->transfer_queued--; + mwchan->transfer_count++; + mwchan->status = ready; + spin_unlock_irqrestore(&mwchan->slock, flags); + + sysfs_notify_dirent(mwchan->irq_kn); +} + +void mwadma_tx_cb_continuous_signal_dataflow(void *data) +{ + struct mwadma_slist *block = data; + struct mwadma_chan *mwchan = block->qchan; + unsigned char start_next = 0; + unsigned long flags; + + spin_lock_irqsave(&mwchan->slock, flags); + mwchan->blocks[block->buffer_index]->state = MWDMA_READY; + mwchan->transfer_queued--; + if(mwchan->transfer_queued > 0) { + if(mwchan->transfer_queued > TX_WATERMARK_QFULL) /* High watermark */ { + mwchan->error = TX_ERROR_QFULL; + } else if(mwchan->transfer_queued > TX_WATERMARK_QPRIME) /* Normal */ { + mwchan->error = TX_ERROR_QPRIME; + } else if(mwchan->transfer_queued >= TX_WATERMARK_QLOW) /* Low */ { + mwchan->error = TX_ERROR_QLOW; + } + } + else /* Underflow */ { + mwchan->error = TX_ERROR_QUNDERFLOW; + mwchan->status = waiting; + } + mwchan->transfer_count++; + spin_unlock_irqrestore(&mwchan->slock, flags); + sysfs_notify_dirent(mwchan->irq_kn); + if (start_next) { + mwadma_start(mwchan); + } +} + + +void mwadma_tx_cb_continuous_signal(void *data) +{ + struct mwadma_slist *block= data; + struct mwadma_chan *mwchan = block->qchan; + unsigned long int flags; + spin_lock_irqsave(&mwchan->slock, flags); + mwchan->transfer_queued--; + mwchan->transfer_count++; + mwchan->blocks[block->buffer_index]->state = MWDMA_READY; + spin_unlock_irqrestore(&mwchan->slock, flags); + sysfs_notify_dirent(mwchan->irq_kn); + mwadma_start(mwchan); +} + + +void mwadma_rx_cb_single_signal(void *data) +{ + struct mwadma_slist *block = data; + struct mwadma_chan *mwchan = block->qchan; + unsigned long flags; + + spin_lock_irqsave(&mwchan->slock, flags); + mwchan->transfer_queued--; + mwchan->transfer_count++; + list_add_tail(&(block->userid), &mwadma_rx_userid); + mwchan->blocks[block->buffer_index]->state = MWDMA_PENDING; + mwchan->status = ready; + spin_unlock_irqrestore(&mwchan->slock, flags); + + sysfs_notify_dirent(mwchan->irq_kn); +} + +void mwadma_rx_cb_burst(void *data) +{ + struct mwadma_slist *block = data; + struct mwadma_chan *mwchan = block->qchan; + unsigned long flags; + unsigned char start_next = 0; + + sysfs_notify_dirent(mwchan->irq_kn); + spin_lock_irqsave(&mwchan->slock, flags); + mwchan->transfer_queued--; + mwchan->transfer_count++; + if (mwchan->transfer_queued) { + start_next = 1; + } else { + mwchan->next_index = block->buffer_index; + } + mwchan->blocks[block->buffer_index]->state = MWDMA_PENDING; + list_add_tail(&(block->userid), &mwadma_rx_userid); + spin_unlock_irqrestore(&mwchan->slock, flags); + if(start_next) { + mwadma_start(mwchan); + } +} + +void mwadma_rx_cb_continuous_signal(void *data) +{ + struct mwadma_slist *block = data; + struct mwadma_chan *mwchan = block->qchan; + unsigned long flags; + unsigned int next_idx,start_next = 0; + + spin_lock_irqsave(&mwchan->slock, flags); + mwchan->blocks[block->buffer_index]->state = MWDMA_PENDING; + list_add_tail(&(block->userid), &mwadma_rx_userid); + mwchan->transfer_count++; + mwchan->transfer_queued++; + next_idx = ( block->buffer_index + 1 ) % mwchan->ring_total; + if (mwchan->blocks[next_idx]->state == MWDMA_PENDING) { + mwchan->error = ERR_RING_OVERFLOW; + start_next = 0; + } else { + start_next = 1; + } + spin_unlock_irqrestore(&mwchan->slock, flags); + atomic64_inc(&rxcount); + sysfs_notify_dirent(mwchan->irq_kn); + if (start_next) { + mwadma_start(mwchan); + } +} +/* + * @brief mwadma_start + */ +int mwadma_start(struct mwadma_chan *mwchan) +{ + int ret = 0; + struct mwadma_slist *newList; + struct dma_async_tx_descriptor *thisDesc; + dma_cookie_t ck; + unsigned long flags; + if(NULL == mwchan) { + dev_err(&mwchan->dev, "mw-axidma: Channel queue pointer is NULL.\n"); + ret = -ENODEV; + goto start_failed; + } + if (mwchan->curr->state == MWDMA_PENDING) { + return -ENOMEM; + } + thisDesc = dmaengine_prep_slave_single(mwchan->chan, mwchan->curr->phys, mwchan->curr->length, mwchan->direction, mwchan->flags); + if (NULL == thisDesc) { + dev_err(&mwchan->dev,"prep_slave_single failed: buf_phys_addr 0x%08lx, size %zu\n", (unsigned long) mwchan->curr->phys, mwchan->curr->length); + ret = -ENOMEM; + goto start_failed; + } + thisDesc->callback = mwchan->callback; + thisDesc->callback_param = mwchan->curr; + mwchan->curr->desc = thisDesc; + mwchan->blocks[mwchan->curr->buffer_index]->state = MWDMA_ACTIVE; + spin_lock_irqsave(&mwchan->slock, flags); + newList = list_entry(mwchan->curr->list.next, struct mwadma_slist, list); + mwchan->prev = mwchan->curr; + mwchan->curr = newList; + mwchan->status = running; + spin_unlock_irqrestore(&mwchan->slock, flags); + ck = dmaengine_submit(thisDesc); + if (dma_submit_error(ck)) { + dev_err(&mwchan->dev, "Failure in dmaengine_submit!\n"); + ret = -ENOSYS; + goto start_failed; + } + dma_async_issue_pending(mwchan->chan); +start_failed: + return ret; +} + +/* + * @brief mwadma_stop + */ +static int mwadma_stop(struct mwadma_chan *mwchan) +{ + dev_dbg(&mwchan->dev,"DMAENGINE_TERMINATE\n"); + dmaengine_terminate_all(mwchan->chan); + return 0; +} + +/* + * @brief mwadma_rx_ctl + */ +static long mwadma_rx_ctl(struct mwadma_dev *mwdev, unsigned int cmd, unsigned long arg) +{ + int ret = 0; + unsigned long userval; + struct mw_axidma_params usrbuf; + struct mwadma_chan *mwchan = mwdev->rx; + enum mwadma_chan_status status; + unsigned int next_index, done_index; + unsigned int error; + unsigned long int flags; + struct mwadma_slist *tmp = NULL; + switch(cmd) + { + case MWADMA_SETUP_RX_CHANNEL: + if(copy_from_user(&usrbuf, (struct mw_axidma_params *)arg, sizeof(struct mw_axidma_params))) + { + return -EACCES; + } + if (mwchan == NULL) + { + dev_err(IP2DEVP(mwdev),"Invalid Memory\n"); + return -ENOMEM; + } + ret = mw_axidma_setupchannel(mwdev, mwchan, &usrbuf); + break; + case MWADMA_RX_SINGLE: + if(copy_from_user(&userval, (unsigned long *)arg, sizeof(userval))) { + return -EACCES; + } + mwchan->callback = (dma_async_tx_callback)mwadma_rx_cb_single_signal; + spin_lock_bh(&mwchan->slock); + mwchan->error = 0; + mwchan->transfer_count = 0; + spin_unlock_bh(&mwchan->slock); + ret = mwadma_start(mwchan); + break; + case MWADMA_RX_BURST: + if(copy_from_user(&userval, (unsigned long *)arg, sizeof(userval))) + { + return -EACCES; + } + mwchan->callback = (dma_async_tx_callback)mwadma_rx_cb_burst; + /* Start from the first */ + if(userval > mwchan->ring_total) + { + return -EINVAL; + } + spin_lock_bh(&mwchan->slock); + mwchan->transfer_queued = userval; + mwchan->transfer_count = 0; + mwchan->error = 0; + spin_unlock_bh(&mwchan->slock); + dev_dbg(IP2DEVP(mwdev), "Start DMA Burst of size %lu\n", userval); + ret = mwadma_start(mwchan); + spin_lock_bh(&mwchan->slock); + mwchan->status = running; + spin_unlock_bh(&mwchan->slock); + break; + case MWADMA_RX_CONTINUOUS: + if(copy_from_user(&userval, (unsigned long *)arg, sizeof(userval))) { + return -EACCES; + } + mwchan->callback = (dma_async_tx_callback)mwadma_rx_cb_continuous_signal; + ret = mwadma_start(mwchan); + break; + case MWADMA_RX_STOP: + spin_lock_bh(&mwchan->slock); + status = (unsigned long) mwchan->status; + spin_unlock_bh(&mwchan->slock); + if(status != ready) { + ret = mwadma_stop(mwchan); + if (ret) { + dev_err(IP2DEVP(mwdev),"Error while stopping DMA\n"); + return ret; + } + } + spin_lock_bh(&mwchan->slock); + mwchan->transfer_queued = 0; + mwchan->transfer_count = 0; + mwchan->status = ready; + spin_unlock_bh(&mwchan->slock); + INIT_LIST_HEAD(&mwadma_rx_userid); + atomic64_set(&rxcount, 0LL); + break; + case MWADMA_RX_GET_NEXT_INDEX: + spin_lock_irqsave(&mwchan->slock, flags); + if (!list_empty(&mwadma_rx_userid)) { + tmp = list_entry(mwadma_rx_userid.next, struct mwadma_slist, userid); + next_index = tmp->buffer_index; + list_del_init(mwadma_rx_userid.next); + } + spin_unlock_irqrestore(&mwchan->slock, flags); + if (NULL == tmp) { + return -ENOMEM; + } + if(copy_to_user((unsigned int *) arg, &next_index, sizeof(next_index))) { + return -EACCES; + } + break; + case MWADMA_RX_GET_ERROR: + if(copy_from_user(&done_index, (unsigned int *)arg, sizeof(done_index))) { + return -EACCES; + } + spin_lock_irqsave(&mwchan->slock, flags); + mwchan->transfer_queued--; + error = mwchan->error; + mwchan->error = 0; + mwchan->blocks[done_index]->state = MWDMA_READY; + spin_unlock_irqrestore(&mwchan->slock, flags); + atomic64_dec_if_positive(&rxcount); + if(copy_to_user((unsigned int *) arg, &error, sizeof(error))) { + return -EACCES; + } + break; + case MWADMA_FREE_RX_CHANNEL: + mwadma_free_channel(mwdev, mwchan); + break; + default: + return 1; + } + return ret; +} + + +/* + * @brief mwadma_tx_ctl + */ +static long mwadma_tx_ctl(struct mwadma_dev *mwdev, unsigned int cmd, unsigned long arg) +{ + int ret = 0; + struct mwadma_chan *mwchan = mwdev->tx; + unsigned long userval; + enum mwadma_chan_status status; + long int transfer_queued; + struct mw_axidma_params usrbuf; + switch(cmd) + { + case MWADMA_SETUP_TX_CHANNEL: + if(copy_from_user(&usrbuf, (struct mw_axidma_params *)arg, sizeof(struct mw_axidma_params))) + { + return -EACCES; + } + if (mwchan == NULL) + { + return -ENOMEM; + } + ret = mw_axidma_setupchannel(mwdev, mwchan, &usrbuf); + break; + + case MWADMA_TX_ENQUEUE: + if(copy_from_user(&userval, (unsigned long *)arg, sizeof(userval))) { + return -EACCES; + } + spin_lock_bh(&mwchan->slock); + transfer_queued = mwchan->transfer_queued; + status = mwchan->status; + spin_unlock_bh(&mwchan->slock); + if((status == ready) && (0 == transfer_queued )) { + spin_lock_bh(&mwchan->slock); + mwchan->next_index = mwchan->curr->buffer_index; + spin_unlock_bh(&mwchan->slock); + } + else + { + mwchan->next_index = (mwchan->next_index + 1) % mwchan->ring_total; + } + if(transfer_queued >= mwchan->ring_total) { + dev_err(IP2DEVP(mwdev), \ + ":queue:%lu, user-queue:%lu, ring:%u\n", \ + mwchan->transfer_queued, \ + userval, \ + mwchan->ring_total); + spin_lock_bh(&mwchan->slock); + mwchan->error = TX_ERROR_QFULL; + spin_unlock_bh(&mwchan->slock); + return 0; + } + spin_lock_bh(&mwchan->slock); + mwchan->transfer_queued += (long)userval; + transfer_queued = mwchan->transfer_queued; + spin_unlock_bh(&mwchan->slock); + if(unlikely((status == waiting) && (transfer_queued >= TX_WATERMARK_QPRIME))) /* restart if required */ + { + dev_dbg(IP2DEVP(mwdev),"Fill level reached = %ld\n", transfer_queued); + mwadma_start(mwchan); + spin_lock_bh(&mwchan->slock); + mwchan->status = running; /*Data ready */ + spin_unlock_bh(&mwchan->slock); + mwadma_start(mwchan); + } + break; + case MWADMA_TX_SINGLE: + if(copy_from_user(&userval, (unsigned long *)arg, sizeof(userval))) { + return -EACCES; + } + switch(userval) + { + case SIGNAL_TRANSFER_COMPLETE: + mwchan->callback = (dma_async_tx_callback)mwadma_tx_cb_single_signal; + break; + default: + mwchan->callback = (dma_async_tx_callback)mwadma_tx_cb_single_signal; + } + spin_lock_bh(&mwchan->slock); + transfer_queued = mwchan->transfer_queued; + spin_unlock_bh(&mwchan->slock); + if (!transfer_queued) { + dev_err(IP2DEVP(mwdev),"Queue is empty\n"); + return -EINVAL; + } + spin_lock_bh(&mwchan->slock); + mwchan->next_index = (mwchan->next_index + 1) % mwchan->ring_total; + spin_unlock_bh(&mwchan->slock); + mwadma_start(mwchan); + break; + case MWADMA_TX_CONTINUOUS: + if(copy_from_user(&userval, (unsigned long *)arg, sizeof(userval))) + { + return -EACCES; + } + spin_lock(&mwchan->slock); + switch(userval) + { + case SIGNAL_TRANSFER_COMPLETE: + mwchan->callback = (dma_async_tx_callback)mwadma_tx_cb_continuous_signal; + break; + case SIGNAL_DATAFLOW: + mwchan->callback = (dma_async_tx_callback)mwadma_tx_cb_continuous_signal_dataflow; + break; + default: + mwchan->callback = (dma_async_tx_callback)mwadma_tx_cb_continuous_signal; + } + mwchan->status = waiting; /* Wait on queued data */ + mwchan->next_index = (mwchan->next_index + 1) % mwchan->ring_total; + spin_unlock(&mwchan->slock);/*!!!UNLOCK!!!*/ + break; + case MWADMA_TX_STOP: + spin_lock(&mwchan->slock);/*!!!LOCK!!!*/ + if(mwchan->status == running) + { + + ret = mwadma_stop(mwchan); + if (ret) + { + dev_err(IP2DEVP(mwdev),"Error while stopping DMA\n"); + spin_unlock(&mwchan->slock);/*!!!UNLOCK-EXIT!!!*/ + return ret; + } + mwchan->status = ready; + } + mwchan->transfer_queued = 0; /* Reset pending transfers */ + spin_unlock(&mwchan->slock); + break; + case MWADMA_TX_GET_ERROR: + dev_dbg(IP2DEVP(mwdev), "Requested Tx error status = %d\n",mwchan->error); + + spin_lock_bh(&mwchan->slock); + userval = mwchan->error; + /*mwchan->error = 0;*/ + spin_unlock_bh(&mwchan->slock); + + if(copy_to_user((unsigned long *) arg, &userval, sizeof(unsigned long))) + { + return -EACCES; + } + break; + case MWADMA_TX_GET_NEXT_INDEX: + spin_lock_bh(&mwchan->slock); + userval = (unsigned long) mwchan->next_index; + spin_unlock_bh(&mwchan->slock); + if(copy_to_user((unsigned long *) arg, &userval, sizeof(unsigned long))) + { + return -EACCES; + } + break; + case MWADMA_FREE_TX_CHANNEL: + mwadma_free_channel(mwdev, mwchan); + break; + default: + return 1; + } + return 0; +} + +/* + * @brief mwadma_generic_ctl + */ +static long mwadma_generic_ctl(struct mwadma_dev *mwdev, unsigned int cmd, unsigned long arg) +{ + struct mw_axidma_params usrbuf; + switch(cmd) + { + case MWADMA_GET_PROPERTIES: + usrbuf.size = MWDEV_TO_MWIP(mwdev)->dma_info.size; + usrbuf.phys = (dma_addr_t)MWDEV_TO_MWIP(mwdev)->dma_info.phys; + if(copy_to_user((struct mw_axidma_params *)arg, &usrbuf, sizeof(struct mw_axidma_params))) { + return -EACCES; + } + break; + case MWADMA_TEST_LOOPBACK: + if(copy_from_user(&usrbuf, (struct mw_axidma_params *)arg, sizeof(struct mw_axidma_params))) + { + return -EACCES; + } + mwdma_test_loopback(mwdev, usrbuf); + break; + default: + return 1; + } + return 0; +} + +/* + * @brief mwadma_ioctl + */ +static long mwadma_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + int ret_rx = 0; + int ret_tx = 0; + int ret_generic = 0; + struct mwadma_dev *mwdev = fp->private_data; + + if (NULL == mwdev) + { + return -ENODEV; + } + + ret_rx = mwadma_rx_ctl(mwdev,cmd,arg); + ret_tx = mwadma_tx_ctl(mwdev,cmd,arg); + ret_generic = mwadma_generic_ctl(mwdev,cmd,arg); + /* Errors */ + if(ret_rx < 0) + { + return ret_rx; + } + if(ret_tx < 0) + { + return ret_tx; + } + if(ret_generic < 0) + { + return ret_generic; + } + + /* No valid case found */ + if(3 == (ret_rx + ret_tx + ret_generic)) + { + dev_dbg(IP2DEVP(mwdev), "Invalid ioctl: command: %u\n", cmd); + return -EINVAL; + } + return 0; +} + + +/* + * @brief mwadma_close + */ +static int mwadma_close(struct inode *inode, struct file *fp) +{ + struct mwadma_dev *mwdev = fp->private_data; + int ret = 0; + + if (NULL == mwdev) { + return -ENODEV; + } + dev_dbg(IP2DEVP(mwdev),"Closing the file-descriptor\n"); + mwadma_fasync_impl(-1, fp, 0); + return ret; +} + +/* + * @brief mwadma_mmap_dma_open + */ +static void mwadma_mmap_dma_open(struct vm_area_struct *vma) +{ + struct mwadma_dev * mwdev = vma->vm_private_data; + dev_info(IP2DEVP(mwdev), "DMA VMA open, virt %lx, phys %lx \n", vma->vm_start, vma->vm_pgoff << PAGE_SHIFT); +} + +/* + * @brief mwadma_free_channel + */ +static void mwadma_free_channel(struct mwadma_dev *mwdev, struct mwadma_chan *mwchan) +{ + struct mwadma_slist *curr, *next; + spin_lock_bh(&mwchan->slock); + list_for_each_entry_safe(curr, next, &(mwchan->scatter->list), list) { + list_del(&curr->list); + mwadma_free_desc(curr, mwchan); + } + mwadma_free_desc(mwchan->scatter, mwchan); + spin_unlock_bh(&mwchan->slock); + dmaengine_terminate_all(mwchan->chan); + dev_dbg(IP2DEVP(mwdev), "MWADMA Free channel done."); +} + +/* + * @brief mwadma_mmap_dma_close + */ +static void mwadma_mmap_dma_close(struct vm_area_struct *vma) +{ + struct mwadma_dev * mwdev = vma->vm_private_data; + dev_info(IP2DEVP(mwdev), "DMA VMA close.\n"); + /* Free the memory DMA */ + if (MWDEV_TO_MWIP(mwdev)->dma_info.size) { + dev_info(IP2DEVP(mwdev), "free dma memory.\n"); + dmam_free_coherent(IP2DEVP(mwdev), MWDEV_TO_MWIP(mwdev)->dma_info.size, MWDEV_TO_MWIP(mwdev)->dma_info.virt, MWDEV_TO_MWIP(mwdev)->dma_info.phys); + MWDEV_TO_MWIP(mwdev)->dma_info.size = 0; + mwdev->channel_offset = 0; + MWDEV_TO_MWIP(mwdev)->dma_info.virt = NULL; + MWDEV_TO_MWIP(mwdev)->dma_info.phys = 0; + atomic64_set(&rxcount, 0LL); + } +} + +/* + * @brief mwadma_mmap_open + */ +static void mwadma_mmap_open(struct vm_area_struct *vma) +{ + struct mwadma_dev * mwdev = vma->vm_private_data; + dev_info(IP2DEVP(mwdev), "Simple VMA open, virt %lx, phys %lx \n", vma->vm_start, vma->vm_pgoff << PAGE_SHIFT); +} + +/* + * @brief mwadma_mmap_close + */ +static void mwadma_mmap_close(struct vm_area_struct *vma) +{ + struct mwadma_dev * mwdev = vma->vm_private_data; + dev_info(&IP2DEV(mwdev ), "Simple VMA close.\n"); +} + +/* + * @brief mwadma_mmap_fault + */ +static vm_fault_t mwadma_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct mwadma_dev * mwdev = vma->vm_private_data; + struct page *thisPage; + unsigned long offset; + offset = (vmf->pgoff - vma->vm_pgoff) << PAGE_SHIFT; + thisPage = virt_to_page(MWDEV_TO_MWIP(mwdev)->mem->start + offset); + get_page(thisPage); + vmf->page = thisPage; + return 0; +} + +struct vm_operations_struct mwadma_mmap_ops = { + .open = mwadma_mmap_open, + .close = mwadma_mmap_close, + .fault = mwadma_mmap_fault, +}; + +struct vm_operations_struct mwadma_mmap_dma_ops = { + .open = mwadma_mmap_dma_open, + .close = mwadma_mmap_dma_close, +}; + + +struct file_operations mwadma_cdev_fops = { + .owner = THIS_MODULE, + .open = mwadma_open, + .fasync = mwadma_fasync_impl, + .release = mwadma_close, + .mmap = mwadma_mmap, + .unlocked_ioctl = mwadma_ioctl, +}; + +/* + * @brief mwadma_mmap + */ +static int mwadma_mmap(struct file *fp, struct vm_area_struct *vma) +{ + struct mwadma_dev *mwdev = fp->private_data; + size_t size = vma->vm_end - vma->vm_start; + int status = 0; + vma->vm_private_data = mwdev; + dev_info(IP2DEVP(mwdev), "[MMAP] size:%X pgoff: %lx\n", (unsigned int)size, vma->vm_pgoff); + + switch(vma->vm_pgoff) { + case 0: + /* mmap the Memory Mapped I/O's base address */ + vma->vm_flags |= VM_IO | VM_DONTDUMP; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, vma->vm_start, + MWDEV_TO_MWIP(mwdev)->mem->start >> PAGE_SHIFT, + size, + vma->vm_page_prot)) + { + return -EAGAIN; + } + vma->vm_ops = &mwadma_mmap_ops; + break; + default: + /* mmap the DMA region */ + status = mw_axidma_alloc(mwdev, size); + if ((status) && (status != -EEXIST)) { + return -ENOMEM; + } + dev_dbg(IP2DEVP(mwdev), "dma setup_cdev successful\n"); + + status = 0; + if (MWDEV_TO_MWIP(mwdev)->dma_info.virt == NULL){ + return -EINVAL; + } + vma->vm_pgoff = 0; + status = dma_mmap_coherent(IP2DEVP(mwdev), vma, MWDEV_TO_MWIP(mwdev)->dma_info.virt, + MWDEV_TO_MWIP(mwdev)->dma_info.phys, MWDEV_TO_MWIP(mwdev)->dma_info.size); + if (status) { + dev_dbg(IP2DEVP(mwdev),"Remapping memory failed, error: %d\n", status); + return status; + } + vma->vm_ops = &mwadma_mmap_dma_ops; + dev_dbg(IP2DEVP(mwdev),"%s: mapped dma addr 0x%08lx at 0x%08lx, size %u\n", + __func__, (unsigned long)MWDEV_TO_MWIP(mwdev)->dma_info.phys, vma->vm_start, + (unsigned int)MWDEV_TO_MWIP(mwdev)->dma_info.size); + break; + } + return status; +} + + +/* + * @brief mw_axidma_alloc + */ +static int mw_axidma_alloc(struct mwadma_dev *mwdev, size_t bufferSize) +{ + if (mwdev == NULL) + { + return -ENOMEM; + } + if (MWDEV_TO_MWIP(mwdev)->dma_info.virt != NULL) + { + dev_err(IP2DEVP(mwdev), "DMA memory already allocated\n"); + return -EEXIST; + } + MWDEV_TO_MWIP(mwdev)->dma_info.virt = dmam_alloc_coherent(IP2DEVP(mwdev), bufferSize, \ + &MWDEV_TO_MWIP(mwdev)->dma_info.phys, \ + GFP_KERNEL); + if (MWDEV_TO_MWIP(mwdev)->dma_info.virt == NULL) + { + dev_err(IP2DEVP(mwdev), "Failed to allocate continguous memory\nUsing multiple buffers\n"); + } + + else { + dev_info(IP2DEVP(mwdev), "Address of buffer = 0x%p, Length = %u Bytes\n",\ + (void *)((uintptr_t)MWDEV_TO_MWIP(mwdev)->dma_info.phys), + (unsigned int)bufferSize); + MWDEV_TO_MWIP(mwdev)->dma_info.size = bufferSize; + } + return 0; +} + +/* + * @brief mw_axidma_setupchannel + */ +static int mw_axidma_setupchannel(struct mwadma_dev *mwdev, + struct mwadma_chan *mwchan, + struct mw_axidma_params *usrbuf) +{ + int status = 0; + static int idx = 0; + char *buf; + dma_addr_t phys; + if ( (mwdev == NULL) || (mwchan == NULL) ) { + return -EINVAL; + } + mwchan->flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + mwchan->ring_total = usrbuf->total_rings; + mwchan->length = usrbuf->bytes_per_ring * usrbuf->total_rings; + mwchan->bd_bytes = usrbuf->bytes_per_ring; + + /* Write to the IPCore_PacketSize_AXI4_Stream_Master 0x8 to specify the length*/ + /*reset pcore*/ + mw_ip_reset(mwdev->mw_ipcore_dev); + /*reset pcore*/ + mw_ip_write32(MWDEV_TO_MWIP(mwdev), 0x8, usrbuf->counter); + if (MWDEV_TO_MWIP(mwdev)->dma_info.virt == NULL) { + dev_err(IP2DEVP(mwdev), "Buffer is NULL. Failed to allocate memory\n"); + return -ENOMEM; + } + buf = MWDEV_TO_MWIP(mwdev)->dma_info.virt; + phys = MWDEV_TO_MWIP(mwdev)->dma_info.phys; + mwchan->buf = &(buf[mwdev->channel_offset]); + + mwchan->phys = (phys + mwdev->channel_offset); + mwchan->offset = mwdev->channel_offset; + mwdev->channel_offset = mwdev->channel_offset + mwchan->length; + /* + * Set channel-index : used to notify appropriate DMA_CHX SYFS node + */ + mwchan->chan_id = idx; + idx++; + dev_dbg(IP2DEVP(mwdev), "### Printing Channel info...\n"); + dev_dbg(IP2DEVP(mwdev), "Virtual Address :0x%p\n", mwchan->buf); + dev_dbg(IP2DEVP(mwdev), "Channel Length/Size :%lu\n", mwchan->length); + dev_dbg(IP2DEVP(mwdev), "Channel direction :%d\n", mwchan->direction); + dev_dbg(IP2DEVP(mwdev), "Total number of rings :%d\n", mwchan->ring_total); + dev_dbg(IP2DEVP(mwdev), "Buffer Descriptor size :%d\n", mwchan->bd_bytes); + /* Get channel for DMA */ + mutex_init(&mwchan->lock); + + dev_dbg(IP2DEVP(mwdev),"Name:%s, mwchan:0x%p, mwchan->chan:0x%p\n", + dma_chan_name(mwchan->chan), mwchan, mwchan->chan); + status = mwadma_prep_desc(mwdev, mwchan); + init_completion(&mwchan->dma_complete); + spin_lock_init(&mwchan->slock); + mwchan->transfer_queued = 0; + return status; +} + +static void mwdma_test_loopback(struct mwadma_dev * mwdev, + struct mw_axidma_params chan_prm) +{ + int i = 0; + size_t len; + char *dma_addr = MWDEV_TO_MWIP(mwdev)->dma_info.virt; + unsigned int *tmp; + /* rx = &dma_addr[0]; + * tx = &dma_addr[chan_prm.size]; + */ + dev_dbg(IP2DEVP(mwdev),"### test loopback\n"); + + len = chan_prm.size; + /* prime the rx & tx buffers */ + tmp = (unsigned int *) dma_addr; + for (i=0;i<(len/sizeof(unsigned int));i++) + { + tmp[i] = 0xDEADC0DE; + } + tmp = (unsigned int *) (dma_addr + len); + for (i=0;i<(len/sizeof(unsigned int));i++) + { + tmp[i] = (i+1) % (chan_prm.bytes_per_ring/sizeof(unsigned int)); + } + /* Receive single ring */ + mwdev->rx->callback = (dma_async_tx_callback)mwadma_rx_cb_single_signal; + mwdev->rx->error = 0; + mwdev->rx->transfer_count = 0; + mwadma_start(mwdev->rx); + /* Transmit single ring */ + mwdev->tx->transfer_queued += 1; + mwdev->tx->callback = (dma_async_tx_callback)mwadma_tx_cb_single_signal; + mwdev->tx->next_index = (mwdev->tx->next_index + 1) % mwdev->tx->ring_total; + mwadma_start(mwdev->tx); +} + + +static ssize_t mwdma_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) +{ + dev_dbg(dev,"sysfs_notify :%s\n", attr->attr.name); + return (sizeof(int)); +} + +static ssize_t mwdma_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + dev_dbg(dev, "sysfs_read :%s\n",attr->attr.name); + return sprintf(buf, "%s\n", attr->attr.name); +} + +static DEVICE_ATTR(dma_ch1, S_IRUGO, mwdma_show, mwdma_store); +static DEVICE_ATTR(dma_ch2, S_IRUGO, mwdma_show, mwdma_store); +static DEVICE_ATTR(dma_ch3, S_IRUGO, mwdma_show, mwdma_store); +static DEVICE_ATTR(dma_ch4, S_IRUGO, mwdma_show, mwdma_store); +static DEVICE_ATTR(dma_ch5, S_IRUGO, mwdma_show, mwdma_store); +static DEVICE_ATTR(dma_ch6, S_IRUGO, mwdma_show, mwdma_store); +static DEVICE_ATTR(dma_ch7, S_IRUGO, mwdma_show, mwdma_store); +static DEVICE_ATTR(dma_ch8, S_IRUGO, mwdma_show, mwdma_store); + +static struct attribute *mwdma_attributes[] = { + &dev_attr_dma_ch1.attr, + &dev_attr_dma_ch2.attr, + &dev_attr_dma_ch3.attr, + &dev_attr_dma_ch4.attr, + &dev_attr_dma_ch5.attr, + &dev_attr_dma_ch6.attr, + &dev_attr_dma_ch7.attr, + &dev_attr_dma_ch8.attr, + NULL, +}; + +static const struct attribute_group mwdma_attr_group = { + .attrs = mwdma_attributes, +}; + +static void mwadma_get_devname(struct mathworks_ip_info *mw_ip_info,char *devname){ + snprintf(devname,MATHWORKS_IP_DEVNAME_LEN, "%s", mw_ip_info->name); +} + +static struct mathworks_ip_ops mwadma_ip_ops = { + .get_devname = mwadma_get_devname, + .get_param = NULL, + .fops = &mwadma_cdev_fops, +}; + +struct mathworks_ip_ops* mw_stream_channel_get_ops(void) { + return &mwadma_ip_ops; +} + +EXPORT_SYMBOL_GPL(mw_stream_channel_get_ops); + +/******************************** + * Channel Sysfs + ********************************/ + +static ssize_t mw_stream_chan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) +{ + dev_dbg(dev,"sysfs_notify:%s\n", attr->attr.name); + return (sizeof(int)); +} + +static ssize_t mw_stream_chan_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%llu\n", + (unsigned long long)atomic64_read(&rxcount)); +} + +static DEVICE_ATTR(dma_irq, S_IRUGO, mw_stream_chan_show, mw_stream_chan_store); + +static struct attribute *mw_stream_channel_attributes[] = { + &dev_attr_dma_irq.attr, + NULL, +}; + +static const struct attribute_group mw_stream_chan_group = { + .attrs = mw_stream_channel_attributes, +}; + +static const struct attribute_group *mw_stream_chan_groups[] = { + &mw_stream_chan_group, + NULL +}; + +/******************************** + * Channel IDA + ********************************/ + +static void mw_stream_chan_ida_remove(void *opaque){ + struct mwadma_chan* mwchan = opaque; + ida_simple_remove(&mw_stream_channel_ida, mwchan->dev.id); +} + +/* Nothing to actually do upon release */ +static void mw_stream_chan_release(struct device *dev) +{ + struct mwadma_chan* mwchan = STREAMDEV_TO_MWCHAN(dev); + dev_dbg(dev, "Freeing scatter channel dma memory\n"); + if ( (mwchan->scatter !=NULL) && (&mwchan->scatter->list != NULL)) + { + mwadma_free_channel(mwchan->mwdev, mwchan); + } +} + +static struct mwadma_chan* __must_check mw_stream_chan_probe( + struct mwadma_dev *mwdev, + enum dma_transfer_direction direction, + const char *name) +{ + struct dma_chan *chan = NULL; + int status; + struct mwadma_chan* mwchan; + void* resID; + + resID = devres_open_group(IP2DEVP(mwdev), NULL, GFP_KERNEL); + if(!resID) + return ERR_PTR(-ENOMEM); + + chan = dma_request_chan(IP2DEVP(mwdev), name); + if(IS_ERR(chan)){ + if (PTR_ERR(chan) == -EPROBE_DEFER) { + dev_info(IP2DEVP(mwdev), "Deferring probe for channel %s\n", name); + } else { + dev_err(IP2DEVP(mwdev), "Could not find DMA channel %s\n", name); + } + return (void *)chan; + } + /* Create the cleanup action */ + status = devm_add_action(IP2DEVP(mwdev), (devm_action_fn)dma_release_channel, chan); + if(status){ + dma_release_channel(chan); + return ERR_PTR(status); + } + mwchan = (struct mwadma_chan*)devm_kzalloc(IP2DEVP(mwdev), + sizeof(struct mwadma_chan),GFP_KERNEL); + if(!mwchan){ + dev_err(IP2DEVP(mwdev), "Failed to allocate memory for channel %s\n", name); + return ERR_PTR(-ENOMEM); + } + mwchan->mwdev = mwdev; + mwchan->chan = chan; + mwchan->direction = direction; + + device_initialize(&mwchan->dev); + mwchan->dev.parent = IP2DEVP(mwdev); + mwchan->dev.of_node = chan->dev->device.of_node; + mwchan->dev.groups = mw_stream_chan_groups; + mwchan->dev.id = ida_simple_get(&mw_stream_channel_ida, 0, 0, GFP_KERNEL); + mwchan->dev.release = mw_stream_chan_release; + if (mwchan->dev.id < 0) { + return ERR_PTR(mwchan->dev.id); + } + status = devm_add_action(IP2DEVP(mwdev),mw_stream_chan_ida_remove, mwchan); + if(status){ + ida_simple_remove(&mw_stream_channel_ida, mwchan->dev.id); + return ERR_PTR(status); + } + + status = dev_set_name(&mwchan->dev, "%s:%s", dev_name(MWDEV_TO_MWIP(mwdev)->char_device), name); + if (status) + return ERR_PTR(status); + + status = device_add(&mwchan->dev); + if (status) + return ERR_PTR(status); + status = devm_add_action(IP2DEVP(mwdev), (devm_action_fn)device_unregister, &mwchan->dev); + if(status){ + device_unregister(&mwchan->dev); + return ERR_PTR(status); + } + devres_close_group(IP2DEVP(mwdev), resID); + + mwchan->irq_kn = sysfs_get_dirent(mwchan->dev.kobj.sd, "dma_irq"); + if(!mwchan->irq_kn){ + return ERR_PTR(-ENODEV); + } + status = devm_add_action(&mwchan->dev, (devm_action_fn)sysfs_put, mwchan->irq_kn); + if(status) { + sysfs_put(mwchan->irq_kn); + return ERR_PTR(status); + } + return mwchan; +} + +void mw_stream_channels_release(void *opaque) { + struct mwadma_dev *mwdev = opaque; + dev_info(IP2DEVP(mwdev), "Removing sysfs entries..."); + sysfs_remove_group(&IP2DEVP(mwdev)->kobj, &mwdma_attr_group); +} + +int mw_stream_channels_probe(struct mathworks_ipcore_dev *mw_ipcore_dev) { + struct mwadma_dev *mwdev; + struct device *dev = mw_ipcore_dev->mw_ip_info->dev; + int nchan; + int status; + + mwdev = (struct mwadma_dev*)devm_kzalloc(dev, sizeof(struct mwadma_dev),GFP_KERNEL); + if (!mwdev) { + dev_err(dev, "Failed to allocate memory for device context\n"); + return -ENOMEM; + } + + mwdev->mw_ipcore_dev = mw_ipcore_dev; + mw_ipcore_dev->private = (void*)mwdev; + + nchan = of_property_count_strings(dev->of_node, "dma-names"); + if (nchan == -EINVAL){ + dev_dbg(IP2DEVP(mwdev), "DMA Channels not found in device tree\n"); + return 0; + } + if (nchan < 0) { + dev_err(IP2DEVP(mwdev), "Invalid dma-names specification. Incorrect dma-names property.\n"); + return nchan; + } + + mwdev->tx = mw_stream_chan_probe(mwdev, DMA_MEM_TO_DEV, "mm2s"); + if (IS_ERR(mwdev->tx) && (PTR_ERR(mwdev->tx) == -EPROBE_DEFER)) + return PTR_ERR(mwdev->tx); + mwdev->rx = mw_stream_chan_probe(mwdev, DMA_DEV_TO_MEM, "s2mm"); + if (IS_ERR(mwdev->rx) && (PTR_ERR(mwdev->rx) == -EPROBE_DEFER)) + return PTR_ERR(mwdev->rx); + + if (nchan < 2) { + if(IS_ERR(mwdev->tx) && IS_ERR(mwdev->rx)) { + dev_err(IP2DEVP(mwdev),"MM2S/S2MM not found for nchan=%d\n",nchan); + return PTR_ERR(mwdev->tx); + } + } else { + if (IS_ERR(mwdev->tx)) { + dev_err(IP2DEVP(mwdev),"MM2S not found for nchan=%d\n",nchan); + return PTR_ERR(mwdev->tx); + } + if (IS_ERR(mwdev->rx)) { + dev_err(IP2DEVP(mwdev),"S2MM not found for nchan=%d\n",nchan); + return PTR_ERR(mwdev->rx); + } + } + status = sysfs_create_group(&dev->kobj, &mwdma_attr_group); + if (status) { + dev_err(IP2DEVP(mwdev), "Error creating the sysfs devices\n"); + return status; + } + status = devm_add_action(dev, mw_stream_channels_release, mwdev); + if(status) { + mw_stream_channels_release(mwdev); + return status; + } + + return 0; +} + +EXPORT_SYMBOL_GPL(mw_stream_channels_probe); + +static int __init mw_stream_channel_init(void) +{ + return 0; +} + +static void __exit mw_stream_channel_exit(void) +{ + +} + +module_init(mw_stream_channel_init); +module_exit(mw_stream_channel_exit); + +MODULE_AUTHOR("MathWorks, Inc"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MathWorks Streaming DMA Channel"); +MODULE_ALIAS(DRIVER_NAME); diff --git a/drivers/misc/mathworks/mw_stream_channel.h b/drivers/misc/mathworks/mw_stream_channel.h new file mode 100755 index 0000000000000..f51bcb984796e --- /dev/null +++ b/drivers/misc/mathworks/mw_stream_channel.h @@ -0,0 +1,120 @@ +/* + * MathWorks Streaming Channel + * + * Copyright 2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#ifndef _MW_STREAM_CHANNEL_H_ +#define _MW_STREAM_CHANNEL_H_ + +#include +#include +#include + +#include "mathworks_ipcore.h" +#include "mwadma_ioctl.h" /* IOCTL */ + +enum DESCRIPTOR_STATUS { + BD_UNALLOC = -1, + BD_ALLOC = 0, + BD_MAPPED, + BD_ISSUED, + BD_PROCESS, + BD_PROCESSED, + BD_MAX_DESCRIPTOR_STATUS +}; + + +#define ERR_RING_OVERFLOW 0x1 + +enum mwadma_chan_status { + ready = 0x0, /* default state on init and reset */ + running = 0x1, + waiting = 0x2 /* waiting on data for tx */ +}; + +enum mwadma_buffer_block_state { + MWDMA_ACTIVE = 0x1, + MWDMA_PENDING = 0x2, + MWDMA_READY = 0x3 +}; + +// BLOCK +struct mwadma_slist { + struct list_head list; + struct list_head userid; + dma_addr_t phys; + size_t length; + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + unsigned int buffer_index; + enum mwadma_buffer_block_state state; + struct mwadma_chan *qchan; +}; + +/* structure contains common parmaters for rx/tx. + * Not all params are sensible for both + */ +struct mwadma_chan; + +struct mwadma_dev { + struct fasync_struct *asyncq; + struct mathworks_ipcore_dev *mw_ipcore_dev; + /* Transmit & Receive Channels */ + struct mwadma_chan *rx; + struct mwadma_chan *tx; + unsigned int channel_offset; +}; +// QUEUE +struct mwadma_chan { + struct device dev; + struct mwadma_dev *mwdev; + struct kernfs_node *irq_kn; + spinlock_t slock; + struct mutex lock; + struct dma_chan *chan; + int chan_id; + size_t offset; + enum dma_ctrl_flags flags; + enum dma_transfer_direction direction; + dma_async_tx_callback callback; + char *buf; + dma_addr_t phys; + enum mwadma_chan_status status; + unsigned long length; + unsigned long transfer_count; + long transfer_queued; + struct mwadma_slist *scatter; + struct mwadma_slist *curr; + struct mwadma_slist *completed; + struct mwadma_slist *prev; + struct completion dma_complete; + struct tasklet_struct tasklet; + unsigned int next_index; + unsigned int error; + ktime_t start; + ktime_t stop; + unsigned int ring_total; + unsigned int bd_bytes; + struct mwadma_slist **blocks; +}; + + +/********************************************************* +* API functions +*********************************************************/ +#if defined(CONFIG_MWIPCORE_DMA_STREAMING) || defined(CONFIG_MWIPCORE_DMA_STREAMING_MODULE) +extern struct mathworks_ip_ops* mw_stream_channel_get_ops(void); +extern int mw_stream_channels_probe(struct mathworks_ipcore_dev *mw_ipcore_dev); +#else +static inline struct mathworks_ip_ops* mw_stream_channel_get_ops(void) { + return NULL; +} +static inline int mw_stream_channels_probe(struct mathworks_ipcore_dev *mw_ipcore_dev) { + return -ENODEV; +} +#endif + +#endif /* _MW_STREAM_CHANNEL_H_ */ diff --git a/drivers/misc/mathworks/mw_stream_iio_channel.c b/drivers/misc/mathworks/mw_stream_iio_channel.c new file mode 100755 index 0000000000000..10c10926d0059 --- /dev/null +++ b/drivers/misc/mathworks/mw_stream_iio_channel.c @@ -0,0 +1,679 @@ +/* + * MathWorks Streaming Channel + * + * Copyright 2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "mw_stream_iio_channel.h" +#include "mathworks_ipcore.h" + +static DEFINE_IDA(mw_stream_iio_channel_ida); + +#define MWDEV_TO_MWIP(mwdev) (mwdev->mw_ip_info) +#define IP2DEVP(mwdev) (MWDEV_TO_MWIP(mwdev)->dev) + +#define MW_STREAM_IIO_ENUM IIO_ENUM +#define MW_STREAM_IIO_ENUM_AVAILABLE(_name, _shared_by, _e) \ +{ \ + .name = (_name "_available"), \ + .shared = (_shared_by), \ + .read = iio_enum_available_read, \ + .private = (uintptr_t)(_e), \ +} + +struct mw_stream_iio_channel_info { + enum iio_device_direction iio_direction; +}; + +enum mw_stream_iio_tlast_mode { + MW_STREAM_TLAST_MODE_AUTO = 0, + MW_STREAM_TLAST_MODE_USER_LOGIC, +}; + +enum mw_stream_iio_reset_tlast_mode { + MW_STREAM_TLAST_MODE_PREBUFFER = 0, + MW_STREAM_TLAST_MODE_NEVER, +}; + +enum mw_stream_iio_reset_ip_mode { + MW_STREAM_RESET_IP_MODE_NONE = 0, + MW_STREAM_RESET_IP_MODE_ENABLE, + MW_STREAM_RESET_IP_MODE_DISABLE, + MW_STREAM_RESET_IP_MODE_ALL, +}; + +struct mw_stream_iio_chandev { + struct mathworks_ipcore_dev *mwdev; + struct device dev; + enum iio_device_direction iio_direction; + const char *dmaname; + enum mw_stream_iio_tlast_mode tlast_mode; + enum mw_stream_iio_reset_tlast_mode reset_tlast_mode; + enum mw_stream_iio_reset_ip_mode reset_ip_mode; + int tlast_cntr_addr; + int num_data_chan; +}; + +static void mw_stream_iio_chan_ida_remove(void *opaque){ + struct mw_stream_iio_chandev* mwchan = opaque; + ida_simple_remove(&mw_stream_iio_channel_ida, mwchan->dev.id); +} + +static int mw_stream_iio_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) +{ + struct iio_dev *indio_dev = queue->driver_data; + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + int direction; + + if(mwchan->iio_direction == IIO_DEVICE_DIRECTION_IN) { + direction = DMA_FROM_DEVICE; + } else { + direction = DMA_TO_DEVICE; + } + + return iio_dmaengine_buffer_submit_block(queue, block, direction); +} + + +static int mw_stream_iio_buffer_preenable(struct iio_dev *indio_dev) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + dev_dbg(&mwchan->dev, "buffer preenable\n"); + + switch(mwchan->reset_ip_mode) { + case MW_STREAM_RESET_IP_MODE_ENABLE: + case MW_STREAM_RESET_IP_MODE_ALL: + /* reset the ip core */ + dev_dbg(&mwchan->dev, "resetting IP Core\n"); + mw_ip_reset(mwchan->mwdev); + break; + default: + /* Do Nothing */ + break; + } + if (mwchan->tlast_cntr_addr >= 0 && mwchan->tlast_mode == MW_STREAM_TLAST_MODE_AUTO) { + if(mwchan->reset_tlast_mode == MW_STREAM_TLAST_MODE_PREBUFFER) { + /* reset the IP core (TODO: only reset the TLAST register)*/ + mw_ip_reset(mwchan->mwdev); + } + /* Set the TLAST count */ + mw_ip_write32(mwchan->mwdev->mw_ip_info, mwchan->tlast_cntr_addr, indio_dev->buffer->length); + } + + return 0; +} +static int mw_stream_iio_buffer_postenable(struct iio_dev *indio_dev) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + dev_dbg(&mwchan->dev, "buffer postenable\n"); + return 0; +} + +static int mw_stream_iio_buffer_predisable(struct iio_dev *indio_dev) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + dev_dbg(&mwchan->dev, "buffer predisable\n"); + + switch(mwchan->reset_ip_mode) { + case MW_STREAM_RESET_IP_MODE_DISABLE: + case MW_STREAM_RESET_IP_MODE_ALL: + /* reset the ip core */ + dev_dbg(&mwchan->dev, "resetting IP Core\n"); + + mw_ip_reset(mwchan->mwdev); + break; + default: + /* Do Nothing */ + break; + } + + return 0; +} + +static int mw_stream_iio_buffer_postdisable(struct iio_dev *indio_dev) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + dev_dbg(&mwchan->dev, "buffer postdisable\n"); + return 0; +} + + +static const struct iio_buffer_setup_ops mw_stream_iio_buffer_setup_ops = { + + .preenable = &mw_stream_iio_buffer_preenable, + .postenable = &mw_stream_iio_buffer_postenable, + .predisable = &mw_stream_iio_buffer_predisable, + .postdisable = &mw_stream_iio_buffer_postdisable, +}; + +static const struct iio_dma_buffer_ops mw_stream_iio_buffer_dma_buffer_ops = { + .submit = mw_stream_iio_buffer_submit_block, + .abort = iio_dmaengine_buffer_abort, +}; + +/************* + * Reset IP Modes + *************/ +static const char * const mw_stream_iio_channel_reset_ip_modes[] = { "none", "enable", "disable", "all" }; + +static int mw_stream_iio_channel_get_reset_ip_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + return mwchan->reset_ip_mode; +} + +static int mw_stream_iio_channel_set_reset_ip_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, unsigned int mode) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + mutex_lock(&indio_dev->mlock); + mwchan->reset_ip_mode = mode; + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +static const struct iio_enum mw_stream_iio_channel_reset_ip_mode_enum = { + .items = mw_stream_iio_channel_reset_ip_modes, + .num_items = ARRAY_SIZE(mw_stream_iio_channel_reset_ip_modes), + .get = mw_stream_iio_channel_get_reset_ip_mode, + .set = mw_stream_iio_channel_set_reset_ip_mode, +}; + +/************* + * Reset TLAST Modes + *************/ +static const char * const mw_stream_iio_channel_reset_tlast_modes[] = { "prebuffer", "never" }; + +static int mw_stream_iio_channel_get_reset_tlast_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + return mwchan->reset_tlast_mode; +} + +static int mw_stream_iio_channel_set_reset_tlast_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, unsigned int mode) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + mutex_lock(&indio_dev->mlock); + mwchan->reset_tlast_mode = mode; + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +static const struct iio_enum mw_stream_iio_channel_reset_tlast_mode_enum = { + .items = mw_stream_iio_channel_reset_tlast_modes, + .num_items = ARRAY_SIZE(mw_stream_iio_channel_reset_tlast_modes), + .get = mw_stream_iio_channel_get_reset_tlast_mode, + .set = mw_stream_iio_channel_set_reset_tlast_mode, +}; + +/************* + * TLAST Modes + *************/ +static const char * const mw_stream_iio_channel_tlast_modes[] = { "auto", "user_logic" }; + +static int mw_stream_iio_channel_get_tlast_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + return mwchan->tlast_mode; +} + +static int mw_stream_iio_channel_set_tlast_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, unsigned int mode) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + mutex_lock(&indio_dev->mlock); + mwchan->tlast_mode = mode; + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +static const struct iio_enum mw_stream_iio_channel_tlast_mode_enum = { + .items = mw_stream_iio_channel_tlast_modes, + .num_items = ARRAY_SIZE(mw_stream_iio_channel_tlast_modes), + .get = mw_stream_iio_channel_get_tlast_mode, + .set = mw_stream_iio_channel_set_tlast_mode, +}; + +static const struct iio_chan_spec_ext_info mw_stream_iio_ch_tlast_info[] = { + MW_STREAM_IIO_ENUM("tlast_mode", IIO_SHARED_BY_ALL, &mw_stream_iio_channel_tlast_mode_enum), + MW_STREAM_IIO_ENUM_AVAILABLE("tlast_mode", IIO_SHARED_BY_ALL, &mw_stream_iio_channel_tlast_mode_enum), + MW_STREAM_IIO_ENUM("reset_tlast_mode", IIO_SHARED_BY_ALL, &mw_stream_iio_channel_reset_tlast_mode_enum), + MW_STREAM_IIO_ENUM_AVAILABLE("reset_tlast_mode", IIO_SHARED_BY_ALL, &mw_stream_iio_channel_reset_tlast_mode_enum), + { }, +}; + +static const struct iio_chan_spec_ext_info mw_stream_iio_ch_ip_info[] = { + MW_STREAM_IIO_ENUM("reset_ip_mode", IIO_SHARED_BY_ALL, &mw_stream_iio_channel_reset_ip_mode_enum), + MW_STREAM_IIO_ENUM_AVAILABLE("reset_ip_mode", IIO_SHARED_BY_ALL, &mw_stream_iio_channel_reset_ip_mode_enum), + { }, +}; + + + +static int mw_stream_iio_channel_reg_access(struct iio_dev *indio_dev, + unsigned reg, unsigned writeval, + unsigned *readval) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + mutex_lock(&indio_dev->mlock); + if (readval == NULL) { + mw_ip_write32(mwchan->mwdev->mw_ip_info, reg & 0xFFFF, writeval); + } else { + *readval = mw_ip_read32(mwchan->mwdev->mw_ip_info, reg & 0xFFFF); + } + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +static const struct iio_info mw_stream_iio_dev_info = { + .debugfs_reg_access = &mw_stream_iio_channel_reg_access, +}; + +static int devm_mw_stream_configure_buffer(struct iio_dev *indio_dev) +{ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + struct iio_buffer *buffer; + int status; + + buffer = iio_dmaengine_buffer_alloc(indio_dev->dev.parent, mwchan->dmaname, + &mw_stream_iio_buffer_dma_buffer_ops, indio_dev); + if (IS_ERR(buffer)) { + if(PTR_ERR(buffer) == -EPROBE_DEFER) + dev_info(&indio_dev->dev, "Deferring probe for DMA engine driver load\n"); + else + dev_err(&indio_dev->dev, "Failed to allocate IIO DMA buffer: %ld\n", PTR_ERR(buffer)); + return PTR_ERR(buffer); + } + + status = devm_add_action(indio_dev->dev.parent,(devm_action_fn)iio_dmaengine_buffer_free, buffer); + if(status){ + iio_dmaengine_buffer_free(buffer); + return status; + } + + iio_device_attach_buffer(indio_dev, buffer); + + indio_dev->modes = INDIO_BUFFER_HARDWARE; + indio_dev->direction = mwchan->iio_direction; + indio_dev->setup_ops = &mw_stream_iio_buffer_setup_ops; + + return 0; +} + +static int mw_stream_setup_ip_channel(struct iio_dev *indio_dev, struct iio_chan_spec *channel){ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + channel->type = IIO_GENERIC_DATA; + channel->indexed = 1; + channel->extend_name = devm_kstrdup(&mwchan->dev, "ip_info", GFP_KERNEL); + if (!channel->extend_name) + return -ENOMEM; + channel->ext_info = mw_stream_iio_ch_ip_info; + channel->scan_index = -ENODEV; + + return 0; +} + +static int mw_stream_setup_tlast_channel(struct iio_dev *indio_dev, struct iio_chan_spec *channel){ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + + channel->type = IIO_GENERIC_DATA; + channel->indexed = 1; + channel->extend_name = devm_kstrdup(&mwchan->dev, "tlast_count", GFP_KERNEL); + if (!channel->extend_name) + return -ENOMEM; + channel->ext_info = mw_stream_iio_ch_tlast_info; + channel->scan_index = -ENODEV; + + return 0; +} + +static const char mw_stream_iio_data_channel_compat[] = "mathworks,iio-data-channel-v1.00"; + +static int mw_stream_count_data_channels(struct iio_dev *indio_dev) { + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + struct device_node *data_node; + int count = 0; + for_each_child_of_node(mwchan->dev.of_node,data_node) { + if(of_device_is_compatible(data_node, mw_stream_iio_data_channel_compat)) + count++; + } + return count; +} + +static int mw_stream_setup_scan_type(struct iio_dev *indio_dev, struct device_node *node, struct iio_chan_spec *channel) { + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + int status; + unsigned int storagebits, realbits, shift; + char sign; + const char *fmt; + status = of_property_read_string(node, "mathworks,data-format", &fmt); + if(status) { + dev_err(&mwchan->dev, "Missing data-format specifier for %s\n", node->name); + return status; + } + status = sscanf(fmt, "%c%u/%u>>%u", &sign, &storagebits, &realbits, &shift); + + if (status != 4) { + dev_err(&mwchan->dev, "Invalid data-format specifier for %s\n", node->name); + return -EINVAL; + } + channel->scan_type.sign = sign; + channel->scan_type.storagebits = storagebits; + channel->scan_type.realbits = realbits; + channel->scan_type.shift = shift; + return 0; +} + +static int mw_stream_setup_data_channels(struct iio_dev *indio_dev){ + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + struct iio_chan_spec *channel; + struct device_node *data_node; + int status; + u32 scan_index = 0; + unsigned long *available_scan_masks; + + + for_each_child_of_node(mwchan->dev.of_node,data_node) { + status = of_device_is_compatible(data_node, mw_stream_iio_data_channel_compat); + if(!status) + continue; + status = of_property_read_u32(data_node, "reg", &scan_index); + if(status){ + dev_err(&mwchan->dev, "Missing 'reg' property in node %s\n", data_node->name); + return -EINVAL; + } + if (scan_index >= mwchan->num_data_chan){ + dev_err(&mwchan->dev, "Invalid 'reg' property in node %s: %d\n", data_node->name, scan_index); + return -EINVAL; + } + channel = (struct iio_chan_spec *)&indio_dev->channels[scan_index]; + if(channel->indexed == 1) { + dev_err(&mwchan->dev, "Duplicate 'reg' property in node %s: %d\n", data_node->name, scan_index); + return -EINVAL; + } + channel->indexed = 1; + channel->type = IIO_GENERIC_DATA; + if (mwchan->iio_direction == IIO_DEVICE_DIRECTION_OUT) + channel->output = 1; + channel->channel = scan_index; + channel->scan_index = scan_index; + status = of_property_read_string(data_node, "mathworks,chan-name", &channel->extend_name); + if (status) + channel->extend_name = NULL; + status = mw_stream_setup_scan_type(indio_dev, data_node, channel); + if(status) + return status; + } + + /* Only allow all channels or no channels */ + available_scan_masks = devm_kzalloc(&mwchan->dev, sizeof(unsigned long)*2, GFP_KERNEL); + if(!available_scan_masks) + return -ENOMEM; + available_scan_masks[0] = (1 << mwchan->num_data_chan) -1; + indio_dev->available_scan_masks = available_scan_masks; + + return 0; +} + +static void mw_stream_iio_unregister(void *opaque) { + struct device *dev = opaque; + + /* Unregister the IIO device */ + devres_release_group(dev, mw_stream_iio_unregister); +} + +static int devm_mw_stream_iio_register(struct iio_dev *indio_dev) { + struct mw_stream_iio_chandev *mwchan = iio_priv(indio_dev); + int status; + int chIdx = 0; + + if(!devres_open_group(&mwchan->dev, mw_stream_iio_unregister, GFP_KERNEL)) + return -ENOMEM; + + indio_dev->dev.parent = &mwchan->dev; + indio_dev->name = dev_name(&mwchan->dev); + indio_dev->info = &mw_stream_iio_dev_info; + + mwchan->num_data_chan = mw_stream_count_data_channels(indio_dev); + + indio_dev->num_channels = mwchan->num_data_chan; + indio_dev->num_channels++; /* info channel */ + if (mwchan->tlast_cntr_addr != -EINVAL) + indio_dev->num_channels++; + + indio_dev->channels = devm_kzalloc(&mwchan->dev, (indio_dev->num_channels) * sizeof(struct iio_chan_spec), GFP_KERNEL); + if(!indio_dev->channels) + return -ENOMEM; + + status = mw_stream_setup_data_channels(indio_dev); + if(status) + return status; + chIdx += mwchan->num_data_chan; + + status = mw_stream_setup_ip_channel(indio_dev, (struct iio_chan_spec *)&indio_dev->channels[chIdx++]); + if(status) + return status; + + if (mwchan->tlast_cntr_addr != -EINVAL) { + status = mw_stream_setup_tlast_channel(indio_dev, (struct iio_chan_spec *)&indio_dev->channels[chIdx++]); + if(status) + return status; + } + + status = devm_mw_stream_configure_buffer(indio_dev); + if (status){ + return status; + } + + status = devm_iio_device_register(&mwchan->dev, indio_dev); + if(status) + return status; + + devres_close_group(&mwchan->dev, mw_stream_iio_unregister); + + /* Setup the parent device to tear us down on removal */ + status = devm_add_action(mwchan->dev.parent, mw_stream_iio_unregister, &mwchan->dev); + if(status){ + mw_stream_iio_unregister(&mwchan->dev); + return status; + } + + return 0; +} + +/* Nothing to actually do upon release */ +static void mw_stream_iio_channel_release(struct device *dev) +{ +} + +static struct iio_dev *devm_mw_stream_iio_alloc( + struct mathworks_ipcore_dev *mwdev, + struct device_node *node, + struct mw_stream_iio_channel_info *info) +{ + struct iio_dev *indio_dev; + struct mw_stream_iio_chandev *mwchan; + const char *devname; + int status; + + + if(!devres_open_group(IP2DEVP(mwdev), devm_mw_stream_iio_alloc, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + + indio_dev = devm_iio_device_alloc(IP2DEVP(mwdev), sizeof(struct mw_stream_iio_chandev)); + if (!indio_dev){ + dev_err(IP2DEVP(mwdev), "Failed to allocate memory for channel %s\n",node->name); + return ERR_PTR(-ENOMEM); + } + + mwchan = iio_priv(indio_dev); + mwchan->mwdev = mwdev; + mwchan->iio_direction = info->iio_direction; + + /* Find the name of the DMA channel, there should only be one per node */ + status = of_property_read_string_index(node, "dma-names", 0, &mwchan->dmaname); + if (status) { + dev_err(IP2DEVP(mwdev), "Missing dma-names property for node: %s\n",node->name); + return ERR_PTR(status); + } + if (mwchan->iio_direction == IIO_DEVICE_DIRECTION_IN) { + status = of_property_read_u32(node, "mathworks,sample-cnt-reg", &mwchan->tlast_cntr_addr); + if(status) + mwchan->tlast_cntr_addr = -EINVAL; + } else { + mwchan->tlast_cntr_addr = -EINVAL; + } + + device_initialize(&mwchan->dev); + + mwchan->dev.parent = IP2DEVP(mwdev); + mwchan->dev.of_node = node; + mwchan->dev.id = ida_simple_get(&mw_stream_iio_channel_ida, 0, 0, GFP_KERNEL); + if (mwchan->dev.id < 0) { + return ERR_PTR(mwchan->dev.id); + } + status = devm_add_action(IP2DEVP(mwdev),mw_stream_iio_chan_ida_remove, mwchan); + if(status){ + mw_stream_iio_chan_ida_remove(mwchan); + return ERR_PTR(status); + } + mwchan->dev.release = mw_stream_iio_channel_release; + /* clone the parent's DMA config */ + memcpy(&mwchan->dev.archdata, &IP2DEVP(mwdev)->archdata, sizeof(struct dev_archdata)); + mwchan->dev.coherent_dma_mask = IP2DEVP(mwdev)->coherent_dma_mask; + mwchan->dev.dma_mask = IP2DEVP(mwdev)->dma_mask; + mwchan->dev.dma_range_map = IP2DEVP(mwdev)->dma_range_map; + + + status = of_property_read_string(node, "mathworks,dev-name", &devname); + if (!status) { + /* Use the specified channel name */ + status = dev_set_name(&mwchan->dev, "%s:%s", dev_name(mwchan->mwdev->mw_ip_info->char_device), devname); + } else { + /* Use the node name + dev ID */ + status = dev_set_name(&mwchan->dev, "%s:%s%d", dev_name(mwchan->mwdev->mw_ip_info->char_device), node->name, mwchan->dev.id); + } + if (status) + return ERR_PTR(status); + + status = device_add(&mwchan->dev); + if (status) + return ERR_PTR(status); + + status = devm_add_action(IP2DEVP(mwdev), (devm_action_fn)device_unregister, &mwchan->dev); + if (status) { + device_unregister(&mwchan->dev); + return ERR_PTR(status); + } + + devres_close_group(IP2DEVP(mwdev), devm_mw_stream_iio_alloc); + + return indio_dev; +} + +static int mw_stream_iio_channel_probe( + struct mathworks_ipcore_dev *mwdev, + struct device_node *node, + struct mw_stream_iio_channel_info *info) +{ + int status; + struct iio_dev *indio_dev; + + indio_dev = devm_mw_stream_iio_alloc(mwdev, node, info); + if (IS_ERR(indio_dev)) + return PTR_ERR(indio_dev); + + status = devm_mw_stream_iio_register(indio_dev); + if (status) + return status; + + return 0; +} + +static struct mw_stream_iio_channel_info mw_stream_iio_mm2s_info = { + .iio_direction = IIO_DEVICE_DIRECTION_OUT, +}; + +static struct mw_stream_iio_channel_info mw_stream_iio_s2mm_info = { + .iio_direction = IIO_DEVICE_DIRECTION_IN, +}; + +static const struct of_device_id mw_stream_iio_channel_of_match[] = { + { .compatible = "mathworks,axi4stream-mm2s-channel-v1.00", .data = &mw_stream_iio_mm2s_info}, + { .compatible = "mathworks,axi4stream-s2mm-channel-v1.00", .data = &mw_stream_iio_s2mm_info}, + {}, +}; + +int mw_stream_iio_channels_probe(struct mathworks_ipcore_dev *mwdev) +{ + int status; + + struct device_node *child; + const struct of_device_id *match; + + + for_each_child_of_node(IP2DEVP(mwdev)->of_node,child) { + match = of_match_node(mw_stream_iio_channel_of_match, child); + if(match){ + status = mw_stream_iio_channel_probe(mwdev, child, (struct mw_stream_iio_channel_info *)match->data); + if(status) + return status; + } + } + + return 0; +} + +EXPORT_SYMBOL_GPL(mw_stream_iio_channels_probe); + +static int __init mw_stream_iio_channel_init(void) +{ + return 0; +} + +static void __exit mw_stream_iio_channel_exit(void) +{ + +} + +module_init(mw_stream_iio_channel_init); +module_exit(mw_stream_iio_channel_exit); + +MODULE_AUTHOR("MathWorks, Inc"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MathWorks Streaming IIO Channel"); +MODULE_ALIAS(DRIVER_NAME); diff --git a/drivers/misc/mathworks/mw_stream_iio_channel.h b/drivers/misc/mathworks/mw_stream_iio_channel.h new file mode 100755 index 0000000000000..f05e452cda084 --- /dev/null +++ b/drivers/misc/mathworks/mw_stream_iio_channel.h @@ -0,0 +1,25 @@ +/* + * MathWorks Streaming Channel + * + * Copyright 2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#ifndef _MW_STREAM_IIO_CHANNEL_H_ +#define _MW_STREAM_IIO_CHANNEL_H_ + +#include "mathworks_ipcore.h" + +/********************************************************* +* API functions +*********************************************************/ +#if defined(CONFIG_MWIPCORE_IIO_STREAMING) || defined(CONFIG_MWIPCORE_IIO_STREAMING_MODULE) +extern int mw_stream_iio_channels_probe(struct mathworks_ipcore_dev *mwdev); +#else +static inline int mw_stream_iio_channels_probe(struct mathworks_ipcore_dev *mwdev) { + return -ENODEV; +} +#endif + +#endif /* _MW_STREAM_IIO_CHANNEL_H_ */ diff --git a/drivers/misc/mathworks/mwadma_ioctl.h b/drivers/misc/mathworks/mwadma_ioctl.h new file mode 100755 index 0000000000000..2772437afbdb1 --- /dev/null +++ b/drivers/misc/mathworks/mwadma_ioctl.h @@ -0,0 +1,78 @@ +/* + * MathWorks AXI DMA IOCTLs + * + * Copyright 2013-2016 The MathWorks, Inc + * + * Licensed under the GPL-2. + */ + +#ifndef _MWADMA_IOCTL_H_ +#define _MWADMA_IOCTL_H_ + +#define MAX_DMASIZE 1024*1024*32*2 /* 32 MB per channel */ + +struct mw_axidma_params { + char *virt; + dma_addr_t phys; + size_t offset; + size_t counter; + size_t size; + size_t bytes_per_ring; + size_t desc_length; + size_t total_rings; +}; + +enum SIGNAL_TRANSFER { + SIGNAL_TRANSFER_COMPLETE = 1, + SIGNAL_OFF, + SIGNAL_DATAFLOW, + SIGNAL_BURST_COMPLETE, + MAX_SIGNAL_TRANSFER +}; + +enum TX_QUEUE_ERROR { + TX_ERROR_QUNDERFLOW = 0, + TX_ERROR_QLOW, + TX_ERROR_QPRIME, + TX_ERROR_QFULL, + TX_ERROR_QOVERFLOW, + MAX_TX_QUEUE_ERROR +}; + +enum TX_WATERMARK { + TX_WATERMARK_QUNDERFLOW = 0, + TX_WATERMARK_QLOW, + TX_WATERMARK_QPRIME, + TX_WATERMARK_QFULL = 20, + TX_WATERMARK_QOVERFLOW = 256, + MAX_TX_WATERMARK +}; + +#define MWADMA_MAGIC 'Q' +#define MWADMA_IOC_RESET _IO(MWADMA_MAGIC, 0) +#define MWADMA_GET_PROPERTIES _IO(MWADMA_MAGIC, 1) +#define MWADMA_TEST_LOOPBACK _IO(MWADMA_MAGIC, 18) + +#define MWADMA_SETUP_RX_CHANNEL _IO(MWADMA_MAGIC, 2) +#define MWADMA_RX_SINGLE _IO(MWADMA_MAGIC, 4) +#define MWADMA_RX_BURST _IO(MWADMA_MAGIC, 6) +#define MWADMA_RX_CONTINUOUS _IO(MWADMA_MAGIC, 8) +#define MWADMA_RX_GET_NEXT_INDEX _IO(MWADMA_MAGIC, 10) +#define MWADMA_RX_STOP _IO(MWADMA_MAGIC, 12) +#define MWADMA_RX_GET_ERROR _IO(MWADMA_MAGIC, 14) +#define MWADMA_FREE_RX_CHANNEL _IO(MWADMA_MAGIC, 16) + +#define MWADMA_SETUP_TX_CHANNEL _IO(MWADMA_MAGIC, 3) +#define MWADMA_TX_ENQUEUE _IO(MWADMA_MAGIC, 5) +#define MWADMA_TX_SINGLE _IO(MWADMA_MAGIC, 7) +#define MWADMA_TX_BURST _IO(MWADMA_MAGIC, 9) +#define MWADMA_TX_CONTINUOUS _IO(MWADMA_MAGIC, 11) +#define MWADMA_TX_GET_ERROR _IO(MWADMA_MAGIC, 13) +#define MWADMA_TX_STOP _IO(MWADMA_MAGIC, 15) +#define MWADMA_TX_GET_NEXT_INDEX _IO(MWADMA_MAGIC, 17) +#define MWADMA_FREE_TX_CHANNEL _IO(MWADMA_MAGIC, 19) + + +#endif /*_MWADMA_IOCTL_H_ */ + +/*EOF*/