@@ -17,13 +17,13 @@
#ifdef CONFIG_USER_EVENTS
struct user_event_mm {
- struct list_head link;
- struct list_head enablers;
- struct mm_struct *mm;
- struct user_event_mm *next;
- refcount_t refcnt;
- refcount_t tasks;
- struct rcu_work put_rwork;
+ struct list_head link;
+ struct list_head enablers;
+ struct mm_struct *mm;
+ struct user_event_mm *next;
+ refcount_t refcnt;
+ refcount_t tasks;
+ struct rcu_work put_rwork;
};
extern void user_event_mm_dup(struct task_struct *t,
@@ -25,25 +25,25 @@
struct user_reg {
/* Input: Size of the user_reg structure being used */
- __u32 size;
+ __u32 size;
/* Input: Bit in enable address to use */
- __u8 enable_bit;
+ __u8 enable_bit;
/* Input: Enable size in bytes at address */
- __u8 enable_size;
+ __u8 enable_size;
/* Input: Flags for future use, set to 0 */
- __u16 flags;
+ __u16 flags;
/* Input: Address to update when enabled */
- __u64 enable_addr;
+ __u64 enable_addr;
/* Input: Pointer to string with event name, description and flags */
- __u64 name_args;
+ __u64 name_args;
/* Output: Index of the event to use when writing data */
- __u32 write_index;
+ __u32 write_index;
} __attribute__((__packed__));
/*
@@ -52,19 +52,19 @@ struct user_reg {
*/
struct user_unreg {
/* Input: Size of the user_unreg structure being used */
- __u32 size;
+ __u32 size;
/* Input: Bit to unregister */
- __u8 disable_bit;
+ __u8 disable_bit;
/* Input: Reserved, set to 0 */
- __u8 __reserved;
+ __u8 __reserved;
/* Input: Reserved, set to 0 */
- __u16 __reserved2;
+ __u16 __reserved2;
/* Input: Address to unregister */
- __u64 disable_addr;
+ __u64 disable_addr;
} __attribute__((__packed__));
#define DIAG_IOC_MAGIC '*'
@@ -53,9 +53,9 @@
* allows isolation for events by various means.
*/
struct user_event_group {
- char *system_name;
- struct hlist_node node;
- struct mutex reg_mutex;
+ char *system_name;
+ struct hlist_node node;
+ struct mutex reg_mutex;
DECLARE_HASHTABLE(register_table, 8);
};
@@ -76,17 +76,17 @@ static unsigned int current_user_events;
* refcnt reaches one.
*/
struct user_event {
- struct user_event_group *group;
- struct tracepoint tracepoint;
- struct trace_event_call call;
- struct trace_event_class class;
- struct dyn_event devent;
- struct hlist_node node;
- struct list_head fields;
- struct list_head validators;
- refcount_t refcnt;
- int min_size;
- char status;
+ struct user_event_group *group;
+ struct tracepoint tracepoint;
+ struct trace_event_call call;
+ struct trace_event_class class;
+ struct dyn_event devent;
+ struct hlist_node node;
+ struct list_head fields;
+ struct list_head validators;
+ refcount_t refcnt;
+ int min_size;
+ char status;
};
/*
@@ -95,12 +95,12 @@ struct user_event {
* these to track enablement sites that are tied to an event.
*/
struct user_event_enabler {
- struct list_head link;
- struct user_event *event;
- unsigned long addr;
+ struct list_head link;
+ struct user_event *event;
+ unsigned long addr;
/* Track enable bit, flags, etc. Aligned for bitops. */
- unsigned int values;
+ unsigned int values;
};
/* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
@@ -119,9 +119,9 @@ struct user_event_enabler {
/* Used for asynchronous faulting in of pages */
struct user_event_enabler_fault {
- struct work_struct work;
- struct user_event_mm *mm;
- struct user_event_enabler *enabler;
+ struct work_struct work;
+ struct user_event_mm *mm;
+ struct user_event_enabler *enabler;
};
static struct kmem_cache *fault_cache;
@@ -137,23 +137,23 @@ static DEFINE_SPINLOCK(user_event_mms_lock);
* These are not shared and only accessible by the file that created it.
*/
struct user_event_refs {
- struct rcu_head rcu;
- int count;
- struct user_event *events[];
+ struct rcu_head rcu;
+ int count;
+ struct user_event *events[];
};
struct user_event_file_info {
- struct user_event_group *group;
- struct user_event_refs *refs;
+ struct user_event_group *group;
+ struct user_event_refs *refs;
};
#define VALIDATOR_ENSURE_NULL (1 << 0)
#define VALIDATOR_REL (1 << 1)
struct user_event_validator {
- struct list_head link;
- int offset;
- int flags;
+ struct list_head link;
+ int offset;
+ int flags;
};
typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
@@ -2276,11 +2276,11 @@ static int user_events_release(struct inode *node, struct file *file)
}
static const struct file_operations user_data_fops = {
- .open = user_events_open,
- .write = user_events_write,
- .write_iter = user_events_write_iter,
+ .open = user_events_open,
+ .write = user_events_write,
+ .write_iter = user_events_write_iter,
.unlocked_ioctl = user_events_ioctl,
- .release = user_events_release,
+ .release = user_events_release,
};
static void *user_seq_start(struct seq_file *m, loff_t *pos)
@@ -2346,10 +2346,10 @@ static int user_seq_show(struct seq_file *m, void *p)
}
static const struct seq_operations user_seq_ops = {
- .start = user_seq_start,
- .next = user_seq_next,
- .stop = user_seq_stop,
- .show = user_seq_show,
+ .start = user_seq_start,
+ .next = user_seq_next,
+ .stop = user_seq_stop,
+ .show = user_seq_show,
};
static int user_status_open(struct inode *node, struct file *file)
@@ -2375,10 +2375,10 @@ static int user_status_open(struct inode *node, struct file *file)
}
static const struct file_operations user_status_fops = {
- .open = user_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
+ .open = user_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
};
/*