37 #include <linux/module.h>
38 #include <linux/vmalloc.h>
54 static int eccdev_open(
struct inode *,
struct file *);
56 static long eccdev_ioctl(
struct file *,
unsigned int,
unsigned long);
57 static int eccdev_mmap(
struct file *,
struct vm_area_struct *);
62 #define PAGE_FAULT_VERSION KERNEL_VERSION(2, 6, 23)
64 #if LINUX_VERSION_CODE >= PAGE_FAULT_VERSION
67 static struct page *eccdev_vma_nopage(
68 struct vm_area_struct *,
unsigned long,
int *);
86 #if LINUX_VERSION_CODE >= PAGE_FAULT_VERSION
89 .nopage = eccdev_vma_nopage
99 ec_ioctl_context_t
ctx;
119 cdev->
cdev.owner = THIS_MODULE;
121 ret = cdev_add(&cdev->
cdev,
122 MKDEV(MAJOR(dev_num), master->
index), 1);
136 cdev_del(&cdev->
cdev);
153 "Failed to allocate memory for private data structure.\n");
158 priv->
ctx.writable = (filp->f_mode & FMODE_WRITE) != 0;
159 priv->
ctx.requested = 0;
160 priv->
ctx.process_data = NULL;
161 priv->
ctx.process_data_size = 0;
163 filp->private_data = priv;
180 if (priv->
ctx.requested) {
184 if (priv->
ctx.process_data) {
185 vfree(priv->
ctx.process_data);
200 long eccdev_ioctl(
struct file *filp,
unsigned int cmd,
unsigned long arg)
206 "ioctl(filp = 0x%p, cmd = 0x%08x (0x%02x), arg = 0x%lx)\n",
207 filp, cmd, _IOC_NR(cmd), arg);
210 return ec_ioctl(priv->
cdev->
master, &priv->
ctx, cmd, (
void __user *) arg);
218 #define VM_DONTDUMP VM_RESERVED
230 struct vm_area_struct *vma
239 vma->vm_private_data = priv;
246 #if LINUX_VERSION_CODE >= PAGE_FAULT_VERSION
256 struct vm_area_struct *vma,
260 unsigned long offset = vmf->pgoff << PAGE_SHIFT;
264 if (offset >= priv->
ctx.process_data_size) {
265 return VM_FAULT_SIGBUS;
268 page = vmalloc_to_page(priv->
ctx.process_data + offset);
270 return VM_FAULT_SIGBUS;
277 " offset = %lu, page = %p\n", vmf->virtual_address, offset, page);
289 struct page *eccdev_vma_nopage(
290 struct vm_area_struct *vma,
292 unsigned long address,
296 unsigned long offset;
297 struct page *page = NOPAGE_SIGBUS;
301 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
303 if (offset >= priv->
ctx.process_data_size)
304 return NOPAGE_SIGBUS;
306 page = vmalloc_to_page(priv->
ctx.process_data + offset);
309 " offset = %#lx, page = %p\n", address, offset, page);
313 *type = VM_FAULT_MINOR;
void ecrt_release_master(ec_master_t *master)
Releases a requested EtherCAT master.
ec_master_t * master
Master owning the device.
#define VM_DONTDUMP
VM_RESERVED disappeared in 3.7.
Private data structure for file handles.
struct cdev cdev
Character device.
ec_cdev_t * cdev
Character device.
EtherCAT master structure.
#define EC_MASTER_DBG(master, level, fmt, args...)
Convenience macro for printing master-specific debug messages to syslog.
Ethernet over EtherCAT (EoE)
static int eccdev_mmap(struct file *, struct vm_area_struct *)
Memory-map callback for the EtherCAT character device.
struct vm_operations_struct eccdev_vm_ops
Callbacks for a virtual memory area retrieved with ecdevc_mmap().
static int eccdev_vma_fault(struct vm_area_struct *, struct vm_fault *)
Page fault callback for a virtual memory area.
#define EC_MASTER_ERR(master, fmt, args...)
Convenience macro for printing master-specific errors to syslog.
static long eccdev_ioctl(struct file *, unsigned int, unsigned long)
Called when an ioctl() command is issued.
ec_ioctl_context_t ctx
Context.
static int eccdev_open(struct inode *, struct file *)
Called when the cdev is opened.
Vendor specific over EtherCAT protocol handler.
static int eccdev_release(struct inode *, struct file *)
Called when the cdev is closed.
void ec_cdev_clear(ec_cdev_t *cdev)
Destructor.
#define DEBUG
Set to 1 to enable device operations debugging.
EtherCAT master character device.
EtherCAT master character device IOCTL commands.
EtherCAT slave configuration structure.
EtherCAT master character device.
static struct file_operations eccdev_fops
File operation callbacks for the EtherCAT character device.
int ec_cdev_init(ec_cdev_t *cdev, ec_master_t *master, dev_t dev_num)
Constructor.