diff options
Diffstat (limited to 'drivers/staging/android')
-rw-r--r-- | drivers/staging/android/Kconfig | 29 | ||||
-rw-r--r-- | drivers/staging/android/Makefile | 2 | ||||
-rw-r--r-- | drivers/staging/android/ashmem.c | 66 | ||||
-rw-r--r-- | drivers/staging/android/ashmem.h | 7 | ||||
-rw-r--r-- | drivers/staging/android/binder.c | 138 | ||||
-rw-r--r-- | drivers/staging/android/binder.h | 8 | ||||
-rw-r--r-- | drivers/staging/android/logger.c | 196 | ||||
-rw-r--r-- | drivers/staging/android/logger.h | 40 | ||||
-rw-r--r-- | drivers/staging/android/lowmemorykiller.c | 9 | ||||
-rw-r--r-- | drivers/staging/android/sw_sync.c | 264 | ||||
-rw-r--r-- | drivers/staging/android/sw_sync.h | 58 | ||||
-rw-r--r-- | drivers/staging/android/sync.c | 1017 | ||||
-rw-r--r-- | drivers/staging/android/sync.h | 426 | ||||
-rw-r--r-- | drivers/staging/android/trace/sync.h | 82 |
14 files changed, 2237 insertions, 105 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 465a28c08f20..9f61d46da157 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC config ASHMEM bool "Enable the Anonymous Shared Memory Subsystem" default n - depends on SHMEM || TINY_SHMEM + depends on SHMEM ---help--- The ashmem subsystem is a new shared memory allocator, similar to POSIX SHM but with different behavior and sporting a simpler @@ -72,6 +72,33 @@ config ANDROID_INTF_ALARM_DEV elapsed realtime, and a non-wakeup alarm on the monotonic clock. Also exports the alarm interface to user-space. +config SYNC + bool "Synchronization framework" + default n + select ANON_INODES + help + This option enables the framework for synchronization between multiple + drivers. Sync implementations can take advantage of hardware + synchronization built into devices like GPUs. + +config SW_SYNC + bool "Software synchronization objects" + default n + depends on SYNC + help + A sync object driver that uses a 32bit counter to coordinate + syncrhronization. Useful when there is no hardware primitive backing + the synchronization. + +config SW_SYNC_USER + bool "Userspace API for SW_SYNC" + default n + depends on SW_SYNC + help + Provides a user space API to the sw sync object. + *WARNING* improper use of this can result in deadlocking kernel + drivers from userspace. + endif # if ANDROID endmenu diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index b35a631734d6..c136299e05af 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -7,3 +7,5 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o +obj-$(CONFIG_SYNC) += sync.o +obj-$(CONFIG_SW_SYNC) += sw_sync.o diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 634b9ae713e0..e681bdd9aa5f 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -414,20 +414,29 @@ out: static int set_name(struct ashmem_area *asma, void __user *name) { int ret = 0; + char local_name[ASHMEM_NAME_LEN]; - mutex_lock(&ashmem_mutex); + /* + * Holding the ashmem_mutex while doing a copy_from_user might cause + * an data abort which would try to access mmap_sem. If another + * thread has invoked ashmem_mmap then it will be holding the + * semaphore and will be waiting for ashmem_mutex, there by leading to + * deadlock. We'll release the mutex and take the name to a local + * variable that does not need protection and later copy the local + * variable to the structure member with lock held. + */ + if (copy_from_user(local_name, name, ASHMEM_NAME_LEN)) + return -EFAULT; + mutex_lock(&ashmem_mutex); /* cannot change an existing mapping's name */ if (unlikely(asma->file)) { ret = -EINVAL; goto out; } - - if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN, - name, ASHMEM_NAME_LEN))) - ret = -EFAULT; + memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, + local_name, ASHMEM_NAME_LEN); asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; - out: mutex_unlock(&ashmem_mutex); @@ -437,26 +446,36 @@ out: static int get_name(struct ashmem_area *asma, void __user *name) { int ret = 0; + size_t len; + /* + * Have a local variable to which we'll copy the content + * from asma with the lock held. Later we can copy this to the user + * space safely without holding any locks. So even if we proceed to + * wait for mmap_sem, it won't lead to deadlock. + */ + char local_name[ASHMEM_NAME_LEN]; mutex_lock(&ashmem_mutex); if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { - size_t len; /* * Copying only `len', instead of ASHMEM_NAME_LEN, bytes * prevents us from revealing one user's stack to another. */ len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; - if (unlikely(copy_to_user(name, - asma->name + ASHMEM_NAME_PREFIX_LEN, len))) - ret = -EFAULT; + memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); } else { - if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF, - sizeof(ASHMEM_NAME_DEF)))) - ret = -EFAULT; + len = sizeof(ASHMEM_NAME_DEF); + memcpy(local_name, ASHMEM_NAME_DEF, len); } mutex_unlock(&ashmem_mutex); + /* + * Now we are just copying from the stack variable to userland + * No lock held + */ + if (unlikely(copy_to_user(name, local_name, len))) + ret = -EFAULT; return ret; } @@ -683,6 +702,23 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + + switch (cmd) { + case COMPAT_ASHMEM_SET_SIZE: + cmd = ASHMEM_SET_SIZE; + break; + case COMPAT_ASHMEM_SET_PROT_MASK: + cmd = ASHMEM_SET_PROT_MASK; + break; + } + return ashmem_ioctl(file, cmd, arg); +} +#endif + static const struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, @@ -691,7 +727,9 @@ static const struct file_operations ashmem_fops = { .llseek = ashmem_llseek, .mmap = ashmem_mmap, .unlocked_ioctl = ashmem_ioctl, - .compat_ioctl = ashmem_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_ashmem_ioctl, +#endif }; static struct miscdevice ashmem_misc = { diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h index 1976b10ef93e..8dc0f0d3adf3 100644 --- a/drivers/staging/android/ashmem.h +++ b/drivers/staging/android/ashmem.h @@ -14,6 +14,7 @@ #include <linux/limits.h> #include <linux/ioctl.h> +#include <linux/compat.h> #define ASHMEM_NAME_LEN 256 @@ -45,4 +46,10 @@ struct ashmem_pin { #define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) #define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +#define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t) +#define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int) +#endif + #endif /* _LINUX_ASHMEM_H */ diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 24456a0de6b2..1567ac296b39 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -2878,82 +2878,109 @@ static int binder_release(struct inode *nodp, struct file *filp) return 0; } +static int binder_node_release(struct binder_node *node, int refs) +{ + struct binder_ref *ref; + int death = 0; + + list_del_init(&node->work.entry); + binder_release_work(&node->async_todo); + + if (hlist_empty(&node->refs)) { + kfree(node); + binder_stats_deleted(BINDER_STAT_NODE); + + return refs; + } + + node->proc = NULL; + node->local_strong_refs = 0; + node->local_weak_refs = 0; + hlist_add_head(&node->dead_node, &binder_dead_nodes); + + hlist_for_each_entry(ref, &node->refs, node_entry) { + refs++; + + if (!ref->death) + goto out; + + death++; + + if (list_empty(&ref->death->work.entry)) { + ref->death->work.type = BINDER_WORK_DEAD_BINDER; + list_add_tail(&ref->death->work.entry, + &ref->proc->todo); + wake_up_interruptible(&ref->proc->wait); + } else + BUG(); + } + +out: + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "node %d now dead, refs %d, death %d\n", + node->debug_id, refs, death); + + return refs; +} + static void binder_deferred_release(struct binder_proc *proc) { struct binder_transaction *t; struct rb_node *n; - int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; + int threads, nodes, incoming_refs, outgoing_refs, buffers, + active_transactions, page_count; BUG_ON(proc->vma); BUG_ON(proc->files); hlist_del(&proc->proc_node); + if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { binder_debug(BINDER_DEBUG_DEAD_BINDER, - "binder_release: %d context_mgr_node gone\n", - proc->pid); + "%s: %d context_mgr_node gone\n", + __func__, proc->pid); binder_context_mgr_node = NULL; } threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { - struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); + struct binder_thread *thread; + + thread = rb_entry(n, struct binder_thread, rb_node); threads++; active_transactions += binder_free_thread(proc, thread); } + nodes = 0; incoming_refs = 0; while ((n = rb_first(&proc->nodes))) { - struct binder_node *node = rb_entry(n, struct binder_node, rb_node); + struct binder_node *node; + node = rb_entry(n, struct binder_node, rb_node); nodes++; rb_erase(&node->rb_node, &proc->nodes); - list_del_init(&node->work.entry); - binder_release_work(&node->async_todo); - if (hlist_empty(&node->refs)) { - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); - } else { - struct binder_ref *ref; - int death = 0; - - node->proc = NULL; - node->local_strong_refs = 0; - node->local_weak_refs = 0; - hlist_add_head(&node->dead_node, &binder_dead_nodes); - - hlist_for_each_entry(ref, &node->refs, node_entry) { - incoming_refs++; - if (ref->death) { - death++; - if (list_empty(&ref->death->work.entry)) { - ref->death->work.type = BINDER_WORK_DEAD_BINDER; - list_add_tail(&ref->death->work.entry, &ref->proc->todo); - wake_up_interruptible(&ref->proc->wait); - } else - BUG(); - } - } - binder_debug(BINDER_DEBUG_DEAD_BINDER, - "node %d now dead, refs %d, death %d\n", - node->debug_id, incoming_refs, death); - } + incoming_refs = binder_node_release(node, incoming_refs); } + outgoing_refs = 0; while ((n = rb_first(&proc->refs_by_desc))) { - struct binder_ref *ref = rb_entry(n, struct binder_ref, - rb_node_desc); + struct binder_ref *ref; + + ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; binder_delete_ref(ref); } + binder_release_work(&proc->todo); binder_release_work(&proc->delivered_death); - buffers = 0; + buffers = 0; while ((n = rb_first(&proc->allocated_buffers))) { - struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, - rb_node); + struct binder_buffer *buffer; + + buffer = rb_entry(n, struct binder_buffer, rb_node); + t = buffer->transaction; if (t) { t->buffer = NULL; @@ -2962,6 +2989,7 @@ static void binder_deferred_release(struct binder_proc *proc) proc->pid, t->debug_id); /*BUG();*/ } + binder_free_buf(proc, buffer); buffers++; } @@ -2971,18 +2999,20 @@ static void binder_deferred_release(struct binder_proc *proc) page_count = 0; if (proc->pages) { int i; + for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { - if (proc->pages[i]) { - void *page_addr = proc->buffer + i * PAGE_SIZE; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "binder_release: %d: page %d at %p not freed\n", - proc->pid, i, - page_addr); - unmap_kernel_range((unsigned long)page_addr, - PAGE_SIZE); - __free_page(proc->pages[i]); - page_count++; - } + void *page_addr; + + if (!proc->pages[i]) + continue; + + page_addr = proc->buffer + i * PAGE_SIZE; + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%s: %d: page %d at %p not freed\n", + __func__, proc->pid, i, page_addr); + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); + __free_page(proc->pages[i]); + page_count++; } kfree(proc->pages); vfree(proc->buffer); @@ -2991,9 +3021,9 @@ static void binder_deferred_release(struct binder_proc *proc) put_task_struct(proc->tsk); binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "binder_release: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", - proc->pid, threads, nodes, incoming_refs, outgoing_refs, - active_transactions, buffers, page_count); + "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", + __func__, proc->pid, threads, nodes, incoming_refs, + outgoing_refs, active_transactions, buffers, page_count); kfree(proc); } diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h index f240464effde..dbe81ceca1bd 100644 --- a/drivers/staging/android/binder.h +++ b/drivers/staging/android/binder.h @@ -85,11 +85,11 @@ struct binder_version { #define BINDER_CURRENT_PROTOCOL_VERSION 7 #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) -#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t) +#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) #define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) -#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int) -#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int) -#define BINDER_THREAD_EXIT _IOW('b', 8, int) +#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) +#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) +#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) #define BINDER_VERSION _IOWR('b', 9, struct binder_version) /* diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index dbc63cbb4d3a..b14a55742559 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c @@ -68,6 +68,8 @@ static LIST_HEAD(log_list); * @log: The associated log * @list: The associated entry in @logger_log's list * @r_off: The current read head offset. + * @r_all: Reader can read all entries + * @r_ver: Reader ABI version * * This object lives from open to release, so we don't need additional * reference counting. The structure is protected by log->mutex. @@ -76,6 +78,8 @@ struct logger_reader { struct logger_log *log; struct list_head list; size_t r_off; + bool r_all; + int r_ver; }; /* logger_offset - returns index 'n' into the log via (optimized) modulus */ @@ -109,8 +113,29 @@ static inline struct logger_log *file_get_log(struct file *file) } /* - * get_entry_len - Grabs the length of the payload of the next entry starting - * from 'off'. + * get_entry_header - returns a pointer to the logger_entry header within + * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must + * be provided. Typically the return value will be a pointer within + * 'logger->buf'. However, a pointer to 'scratch' may be returned if + * the log entry spans the end and beginning of the circular buffer. + */ +static struct logger_entry *get_entry_header(struct logger_log *log, + size_t off, struct logger_entry *scratch) +{ + size_t len = min(sizeof(struct logger_entry), log->size - off); + if (len != sizeof(struct logger_entry)) { + memcpy(((void *) scratch), log->buffer + off, len); + memcpy(((void *) scratch) + len, log->buffer, + sizeof(struct logger_entry) - len); + return scratch; + } + + return (struct logger_entry *) (log->buffer + off); +} + +/* + * get_entry_msg_len - Grabs the length of the message of the entry + * starting from from 'off'. * * An entry length is 2 bytes (16 bits) in host endian order. * In the log, the length does not include the size of the log entry structure. @@ -118,20 +143,45 @@ static inline struct logger_log *file_get_log(struct file *file) * * Caller needs to hold log->mutex. */ -static __u32 get_entry_len(struct logger_log *log, size_t off) +static __u32 get_entry_msg_len(struct logger_log *log, size_t off) { - __u16 val; + struct logger_entry scratch; + struct logger_entry *entry; - /* copy 2 bytes from buffer, in memcpy order, */ - /* handling possible wrap at end of buffer */ + entry = get_entry_header(log, off, &scratch); + return entry->len; +} - ((__u8 *)&val)[0] = log->buffer[off]; - if (likely(off+1 < log->size)) - ((__u8 *)&val)[1] = log->buffer[off+1]; +static size_t get_user_hdr_len(int ver) +{ + if (ver < 2) + return sizeof(struct user_logger_entry_compat); else - ((__u8 *)&val)[1] = log->buffer[0]; + return sizeof(struct logger_entry); +} - return sizeof(struct logger_entry) + val; +static ssize_t copy_header_to_user(int ver, struct logger_entry *entry, + char __user *buf) +{ + void *hdr; + size_t hdr_len; + struct user_logger_entry_compat v1; + + if (ver < 2) { + v1.len = entry->len; + v1.__pad = 0; + v1.pid = entry->pid; + v1.tid = entry->tid; + v1.sec = entry->sec; + v1.nsec = entry->nsec; + hdr = &v1; + hdr_len = sizeof(struct user_logger_entry_compat); + } else { + hdr = entry; + hdr_len = sizeof(struct logger_entry); + } + + return copy_to_user(buf, hdr, hdr_len); } /* @@ -145,15 +195,31 @@ static ssize_t do_read_log_to_user(struct logger_log *log, char __user *buf, size_t count) { + struct logger_entry scratch; + struct logger_entry *entry; size_t len; + size_t msg_start; /* - * We read from the log in two disjoint operations. First, we read from - * the current read head offset up to 'count' bytes or to the end of + * First, copy the header to userspace, using the version of + * the header requested + */ + entry = get_entry_header(log, reader->r_off, &scratch); + if (copy_header_to_user(reader->r_ver, entry, buf)) + return -EFAULT; + + count -= get_user_hdr_len(reader->r_ver); + buf += get_user_hdr_len(reader->r_ver); + msg_start = logger_offset(log, + reader->r_off + sizeof(struct logger_entry)); + + /* + * We read from the msg in two disjoint operations. First, we read from + * the current msg head offset up to 'count' bytes or to the end of * the log, whichever comes first. */ - len = min(count, log->size - reader->r_off); - if (copy_to_user(buf, log->buffer + reader->r_off, len)) + len = min(count, log->size - msg_start); + if (copy_to_user(buf, log->buffer + msg_start, len)) return -EFAULT; /* @@ -164,9 +230,34 @@ static ssize_t do_read_log_to_user(struct logger_log *log, if (copy_to_user(buf + len, log->buffer, count - len)) return -EFAULT; - reader->r_off = logger_offset(log, reader->r_off + count); + reader->r_off = logger_offset(log, reader->r_off + + sizeof(struct logger_entry) + count); - return count; + return count + get_user_hdr_len(reader->r_ver); +} + +/* + * get_next_entry_by_uid - Starting at 'off', returns an offset into + * 'log->buffer' which contains the first entry readable by 'euid' + */ +static size_t get_next_entry_by_uid(struct logger_log *log, + size_t off, uid_t euid) +{ + while (off != log->w_off) { + struct logger_entry *entry; + struct logger_entry scratch; + size_t next_len; + + entry = get_entry_header(log, off, &scratch); + + if (entry->euid == euid) + return off; + + next_len = sizeof(struct logger_entry) + entry->len; + off = logger_offset(log, off + next_len); + } + + return off; } /* @@ -178,7 +269,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log, * - If there are no log entries to read, blocks until log is written to * - Atomically reads exactly one log entry * - * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read + * Will set errno to EINVAL if read * buffer is insufficient to hold next entry. */ static ssize_t logger_read(struct file *file, char __user *buf, @@ -219,6 +310,10 @@ start: mutex_lock(&log->mutex); + if (!reader->r_all) + reader->r_off = get_next_entry_by_uid(log, + reader->r_off, current_euid()); + /* is there still something to read or did we race? */ if (unlikely(log->w_off == reader->r_off)) { mutex_unlock(&log->mutex); @@ -226,7 +321,8 @@ start: } /* get the size of the next entry */ - ret = get_entry_len(log, reader->r_off); + ret = get_user_hdr_len(reader->r_ver) + + get_entry_msg_len(log, reader->r_off); if (count < ret) { ret = -EINVAL; goto out; @@ -252,7 +348,8 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len) size_t count = 0; do { - size_t nr = get_entry_len(log, off); + size_t nr = sizeof(struct logger_entry) + + get_entry_msg_len(log, off); off = logger_offset(log, off + nr); count += nr; } while (count < len); @@ -382,7 +479,9 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, header.tid = current->pid; header.sec = now.tv_sec; header.nsec = now.tv_nsec; + header.euid = current_euid(); header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD); + header.hdr_size = sizeof(struct logger_entry); /* null writes succeed, return zero */ if (unlikely(!header.len)) @@ -463,6 +562,10 @@ static int logger_open(struct inode *inode, struct file *file) return -ENOMEM; reader->log = log; + reader->r_ver = 1; + reader->r_all = in_egroup_p(inode->i_gid) || + capable(CAP_SYSLOG); + INIT_LIST_HEAD(&reader->list); mutex_lock(&log->mutex); @@ -522,6 +625,10 @@ static unsigned int logger_poll(struct file *file, poll_table *wait) poll_wait(file, &log->wq, wait); mutex_lock(&log->mutex); + if (!reader->r_all) + reader->r_off = get_next_entry_by_uid(log, + reader->r_off, current_euid()); + if (log->w_off != reader->r_off) ret |= POLLIN | POLLRDNORM; mutex_unlock(&log->mutex); @@ -529,11 +636,25 @@ static unsigned int logger_poll(struct file *file, poll_table *wait) return ret; } +static long logger_set_version(struct logger_reader *reader, void __user *arg) +{ + int version; + if (copy_from_user(&version, arg, sizeof(int))) + return -EFAULT; + + if ((version < 1) || (version > 2)) + return -EINVAL; + + reader->r_ver = version; + return 0; +} + static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct logger_log *log = file_get_log(file); struct logger_reader *reader; - long ret = -ENOTTY; + long ret = -EINVAL; + void __user *argp = (void __user *) arg; mutex_lock(&log->mutex); @@ -558,8 +679,14 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } reader = file->private_data; + + if (!reader->r_all) + reader->r_off = get_next_entry_by_uid(log, + reader->r_off, current_euid()); + if (log->w_off != reader->r_off) - ret = get_entry_len(log, reader->r_off); + ret = get_user_hdr_len(reader->r_ver) + + get_entry_msg_len(log, reader->r_off); else ret = 0; break; @@ -568,11 +695,32 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ret = -EBADF; break; } + if (!(in_egroup_p(file->f_dentry->d_inode->i_gid) || + capable(CAP_SYSLOG))) { + ret = -EPERM; + break; + } list_for_each_entry(reader, &log->readers, list) reader->r_off = log->w_off; log->head = log->w_off; ret = 0; break; + case LOGGER_GET_VERSION: + if (!(file->f_mode & FMODE_READ)) { + ret = -EBADF; + break; + } + reader = file->private_data; + ret = reader->r_ver; + break; + case LOGGER_SET_VERSION: + if (!(file->f_mode & FMODE_READ)) { + ret = -EBADF; + break; + } + reader = file->private_data; + ret = logger_set_version(reader, argp); + break; } mutex_unlock(&log->mutex); @@ -592,8 +740,8 @@ static const struct file_operations logger_fops = { }; /* - * Log size must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, - * and less than LONG_MAX minus LOGGER_ENTRY_MAX_LEN. + * Log size must must be a power of two, and greater than + * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)). */ static int __init create_log(char *log_name, int size) { diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h index 9b929a8c7468..cc6bbd99c8e0 100644 --- a/drivers/staging/android/logger.h +++ b/drivers/staging/android/logger.h @@ -21,7 +21,7 @@ #include <linux/ioctl.h> /** - * struct logger_entry - defines a single entry that is given to a logger + * struct user_logger_entry_compat - defines a single entry that is given to a logger * @len: The length of the payload * @__pad: Two bytes of padding that appear to be required * @pid: The generating process' process ID @@ -29,8 +29,12 @@ * @sec: The number of seconds that have elapsed since the Epoch * @nsec: The number of nanoseconds that have elapsed since @sec * @msg: The message that is to be logged + * + * The userspace structure for version 1 of the logger_entry ABI. + * This structure is returned to userspace unless the caller requests + * an upgrade to a newer ABI version. */ -struct logger_entry { +struct user_logger_entry_compat { __u16 len; __u16 __pad; __s32 pid; @@ -40,14 +44,38 @@ struct logger_entry { char msg[0]; }; +/** + * struct logger_entry - defines a single entry that is given to a logger + * @len: The length of the payload + * @hdr_size: sizeof(struct logger_entry_v2) + * @pid: The generating process' process ID + * @tid: The generating process' thread ID + * @sec: The number of seconds that have elapsed since the Epoch + * @nsec: The number of nanoseconds that have elapsed since @sec + * @euid: Effective UID of logger + * @msg: The message that is to be logged + * + * The structure for version 2 of the logger_entry ABI. + * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION) + * is called with version >= 2 + */ +struct logger_entry { + __u16 len; + __u16 hdr_size; + __s32 pid; + __s32 tid; + __s32 sec; + __s32 nsec; + uid_t euid; + char msg[0]; +}; + #define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ #define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ #define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */ #define LOGGER_LOG_MAIN "log_main" /* everything else */ -#define LOGGER_ENTRY_MAX_LEN (4*1024) -#define LOGGER_ENTRY_MAX_PAYLOAD \ - (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry)) +#define LOGGER_ENTRY_MAX_PAYLOAD 4076 #define __LOGGERIO 0xAE @@ -55,5 +83,7 @@ struct logger_entry { #define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */ #define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */ #define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */ +#define LOGGER_GET_VERSION _IO(__LOGGERIO, 5) /* abi version */ +#define LOGGER_SET_VERSION _IO(__LOGGERIO, 6) /* abi version */ #endif /* _LINUX_LOGGER_H */ diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 3b91b0fd4de3..fe74494868ef 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -30,16 +30,19 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/oom.h> #include <linux/sched.h> +#include <linux/swap.h> #include <linux/rcupdate.h> #include <linux/profile.h> #include <linux/notifier.h> -static uint32_t lowmem_debug_level = 2; +static uint32_t lowmem_debug_level = 1; static short lowmem_adj[6] = { 0, 1, @@ -60,7 +63,7 @@ static unsigned long lowmem_deathpending_timeout; #define lowmem_print(level, x...) \ do { \ if (lowmem_debug_level >= (level)) \ - printk(x); \ + pr_info(x); \ } while (0) static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) @@ -74,7 +77,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) int selected_tasksize = 0; short selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); - int other_free = global_page_state(NR_FREE_PAGES); + int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c new file mode 100644 index 000000000000..4928f93bdf3d --- /dev/null +++ b/drivers/staging/android/sw_sync.c @@ -0,0 +1,264 @@ +/* + * drivers/base/sw_sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> + +#include "sw_sync.h" + +static int sw_sync_cmp(u32 a, u32 b) +{ + if (a == b) + return 0; + + return ((s32)a - (s32)b) < 0 ? -1 : 1; +} + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) +{ + struct sw_sync_pt *pt; + + pt = (struct sw_sync_pt *) + sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt)); + + pt->value = value; + + return (struct sync_pt *)pt; +} +EXPORT_SYMBOL(sw_sync_pt_create); + +static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); +} + +static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + return sw_sync_cmp(obj->value, pt->value) >= 0; +} + +static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) +{ + struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a; + struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b; + + return sw_sync_cmp(pt_a->value, pt_b->value); +} + +static int sw_sync_fill_driver_data(struct sync_pt *sync_pt, + void *data, int size) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + + if (size < sizeof(pt->value)) + return -ENOMEM; + + memcpy(data, &pt->value, sizeof(pt->value)); + + return sizeof(pt->value); +} + +static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline, + char *str, int size) +{ + struct sw_sync_timeline *timeline = + (struct sw_sync_timeline *)sync_timeline; + snprintf(str, size, "%d", timeline->value); +} + +static void sw_sync_pt_value_str(struct sync_pt *sync_pt, + char *str, int size) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + snprintf(str, size, "%d", pt->value); +} + +static struct sync_timeline_ops sw_sync_timeline_ops = { + .driver_name = "sw_sync", + .dup = sw_sync_pt_dup, + .has_signaled = sw_sync_pt_has_signaled, + .compare = sw_sync_pt_compare, + .fill_driver_data = sw_sync_fill_driver_data, + .timeline_value_str = sw_sync_timeline_value_str, + .pt_value_str = sw_sync_pt_value_str, +}; + + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + struct sw_sync_timeline *obj = (struct sw_sync_timeline *) + sync_timeline_create(&sw_sync_timeline_ops, + sizeof(struct sw_sync_timeline), + name); + + return obj; +} +EXPORT_SYMBOL(sw_sync_timeline_create); + +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ + obj->value += inc; + + sync_timeline_signal(&obj->obj); +} +EXPORT_SYMBOL(sw_sync_timeline_inc); + +#ifdef CONFIG_SW_SYNC_USER +/* *WARNING* + * + * improper use of this can result in deadlocking kernel drivers from userspace. + */ + +/* opening sw_sync create a new sync obj */ +static int sw_sync_open(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj; + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, current); + + obj = sw_sync_timeline_create(task_comm); + if (obj == NULL) + return -ENOMEM; + + file->private_data = obj; + + return 0; +} + +static int sw_sync_release(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj = file->private_data; + sync_timeline_destroy(&obj->obj); + return 0; +} + +static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg) +{ + int fd = get_unused_fd(); + int err; + struct sync_pt *pt; + struct sync_fence *fence; + struct sw_sync_create_fence_data data; + + if (fd < 0) + return fd; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + err = -EFAULT; + goto err; + } + + pt = sw_sync_pt_create(obj, data.value); + if (pt == NULL) { + err = -ENOMEM; + goto err; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence = sync_fence_create(data.name, pt); + if (fence == NULL) { + sync_pt_free(pt); + err = -ENOMEM; + goto err; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + sync_fence_put(fence); + err = -EFAULT; + goto err; + } + + sync_fence_install(fence, fd); + + return 0; + +err: + put_unused_fd(fd); + return err; +} + +static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) +{ + u32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + sw_sync_timeline_inc(obj, value); + + return 0; +} + +static long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct sw_sync_timeline *obj = file->private_data; + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + return sw_sync_ioctl_create_fence(obj, arg); + + case SW_SYNC_IOC_INC: + return sw_sync_ioctl_inc(obj, arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sw_sync_fops = { + .owner = THIS_MODULE, + .open = sw_sync_open, + .release = sw_sync_release, + .unlocked_ioctl = sw_sync_ioctl, + .compat_ioctl = sw_sync_ioctl, +}; + +static struct miscdevice sw_sync_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw_sync", + .fops = &sw_sync_fops, +}; + +static int __init sw_sync_device_init(void) +{ + return misc_register(&sw_sync_dev); +} + +static void __exit sw_sync_device_remove(void) +{ + misc_deregister(&sw_sync_dev); +} + +module_init(sw_sync_device_init); +module_exit(sw_sync_device_remove); + +#endif /* CONFIG_SW_SYNC_USER */ diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h new file mode 100644 index 000000000000..585040be5f18 --- /dev/null +++ b/drivers/staging/android/sw_sync.h @@ -0,0 +1,58 @@ +/* + * include/linux/sw_sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SW_SYNC_H +#define _LINUX_SW_SYNC_H + +#include <linux/types.h> + +#ifdef __KERNEL__ + +#include "sync.h" + +struct sw_sync_timeline { + struct sync_timeline obj; + + u32 value; +}; + +struct sw_sync_pt { + struct sync_pt pt; + + u32 value; +}; + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name); +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); + +#endif /* __KERNEL __ */ + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; /* fd of new fence */ +}; + +#define SW_SYNC_IOC_MAGIC 'W' + +#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ + struct sw_sync_create_fence_data) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + + +#endif /* _LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c new file mode 100644 index 000000000000..3893a3574769 --- /dev/null +++ b/drivers/staging/android/sync.c @@ -0,0 +1,1017 @@ +/* + * drivers/base/sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/anon_inodes.h> + +#include "sync.h" + +#define CREATE_TRACE_POINTS +#include "trace/sync.h" + +static void sync_fence_signal_pt(struct sync_pt *pt); +static int _sync_pt_has_signaled(struct sync_pt *pt); +static void sync_fence_free(struct kref *kref); +static void sync_dump(void); + +static LIST_HEAD(sync_timeline_list_head); +static DEFINE_SPINLOCK(sync_timeline_list_lock); + +static LIST_HEAD(sync_fence_list_head); +static DEFINE_SPINLOCK(sync_fence_list_lock); + +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name) +{ + struct sync_timeline *obj; + unsigned long flags; + + if (size < sizeof(struct sync_timeline)) + return NULL; + + obj = kzalloc(size, GFP_KERNEL); + if (obj == NULL) + return NULL; + + kref_init(&obj->kref); + obj->ops = ops; + strlcpy(obj->name, name, sizeof(obj->name)); + + INIT_LIST_HEAD(&obj->child_list_head); + spin_lock_init(&obj->child_list_lock); + + INIT_LIST_HEAD(&obj->active_list_head); + spin_lock_init(&obj->active_list_lock); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + return obj; +} +EXPORT_SYMBOL(sync_timeline_create); + +static void sync_timeline_free(struct kref *kref) +{ + struct sync_timeline *obj = + container_of(kref, struct sync_timeline, kref); + unsigned long flags; + + if (obj->ops->release_obj) + obj->ops->release_obj(obj); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_del(&obj->sync_timeline_list); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + kfree(obj); +} + +void sync_timeline_destroy(struct sync_timeline *obj) +{ + obj->destroyed = true; + + /* + * If this is not the last reference, signal any children + * that their parent is going away. + */ + + if (!kref_put(&obj->kref, sync_timeline_free)) + sync_timeline_signal(obj); +} +EXPORT_SYMBOL(sync_timeline_destroy); + +static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) +{ + unsigned long flags; + + pt->parent = obj; + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_add_tail(&pt->child_list, &obj->child_list_head); + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_timeline_remove_pt(struct sync_pt *pt) +{ + struct sync_timeline *obj = pt->parent; + unsigned long flags; + + spin_lock_irqsave(&obj->active_list_lock, flags); + if (!list_empty(&pt->active_list)) + list_del_init(&pt->active_list); + spin_unlock_irqrestore(&obj->active_list_lock, flags); + + spin_lock_irqsave(&obj->child_list_lock, flags); + if (!list_empty(&pt->child_list)) { + list_del_init(&pt->child_list); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +void sync_timeline_signal(struct sync_timeline *obj) +{ + unsigned long flags; + LIST_HEAD(signaled_pts); + struct list_head *pos, *n; + + trace_sync_timeline(obj); + + spin_lock_irqsave(&obj->active_list_lock, flags); + + list_for_each_safe(pos, n, &obj->active_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, active_list); + + if (_sync_pt_has_signaled(pt)) { + list_del_init(pos); + list_add(&pt->signaled_list, &signaled_pts); + kref_get(&pt->fence->kref); + } + } + + spin_unlock_irqrestore(&obj->active_list_lock, flags); + + list_for_each_safe(pos, n, &signaled_pts) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, signaled_list); + + list_del_init(pos); + sync_fence_signal_pt(pt); + kref_put(&pt->fence->kref, sync_fence_free); + } +} +EXPORT_SYMBOL(sync_timeline_signal); + +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) +{ + struct sync_pt *pt; + + if (size < sizeof(struct sync_pt)) + return NULL; + + pt = kzalloc(size, GFP_KERNEL); + if (pt == NULL) + return NULL; + + INIT_LIST_HEAD(&pt->active_list); + kref_get(&parent->kref); + sync_timeline_add_pt(parent, pt); + + return pt; +} +EXPORT_SYMBOL(sync_pt_create); + +void sync_pt_free(struct sync_pt *pt) +{ + if (pt->parent->ops->free_pt) + pt->parent->ops->free_pt(pt); + + sync_timeline_remove_pt(pt); + + kref_put(&pt->parent->kref, sync_timeline_free); + + kfree(pt); +} +EXPORT_SYMBOL(sync_pt_free); + +/* call with pt->parent->active_list_lock held */ +static int _sync_pt_has_signaled(struct sync_pt *pt) +{ + int old_status = pt->status; + + if (!pt->status) + pt->status = pt->parent->ops->has_signaled(pt); + + if (!pt->status && pt->parent->destroyed) + pt->status = -ENOENT; + + if (pt->status != old_status) + pt->timestamp = ktime_get(); + + return pt->status; +} + +static struct sync_pt *sync_pt_dup(struct sync_pt *pt) +{ + return pt->parent->ops->dup(pt); +} + +/* Adds a sync pt to the active queue. Called when added to a fence */ +static void sync_pt_activate(struct sync_pt *pt) +{ + struct sync_timeline *obj = pt->parent; + unsigned long flags; + int err; + + spin_lock_irqsave(&obj->active_list_lock, flags); + + err = _sync_pt_has_signaled(pt); + if (err != 0) + goto out; + + list_add_tail(&pt->active_list, &obj->active_list_head); + +out: + spin_unlock_irqrestore(&obj->active_list_lock, flags); +} + +static int sync_fence_release(struct inode *inode, struct file *file); +static unsigned int sync_fence_poll(struct file *file, poll_table *wait); +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + + +static const struct file_operations sync_fence_fops = { + .release = sync_fence_release, + .poll = sync_fence_poll, + .unlocked_ioctl = sync_fence_ioctl, + .compat_ioctl = sync_fence_ioctl, +}; + +static struct sync_fence *sync_fence_alloc(const char *name) +{ + struct sync_fence *fence; + unsigned long flags; + + fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); + if (fence == NULL) + return NULL; + + fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, + fence, 0); + if (IS_ERR(fence->file)) + goto err; + + kref_init(&fence->kref); + strlcpy(fence->name, name, sizeof(fence->name)); + + INIT_LIST_HEAD(&fence->pt_list_head); + INIT_LIST_HEAD(&fence->waiter_list_head); + spin_lock_init(&fence->waiter_list_lock); + + init_waitqueue_head(&fence->wq); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + + return fence; + +err: + kfree(fence); + return NULL; +} + +/* TODO: implement a create which takes more that one sync_pt */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) +{ + struct sync_fence *fence; + + if (pt->fence) + return NULL; + + fence = sync_fence_alloc(name); + if (fence == NULL) + return NULL; + + pt->fence = fence; + list_add(&pt->pt_list, &fence->pt_list_head); + sync_pt_activate(pt); + + /* + * signal the fence in case pt was activated before + * sync_pt_activate(pt) was called + */ + sync_fence_signal_pt(pt); + + return fence; +} +EXPORT_SYMBOL(sync_fence_create); + +static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) +{ + struct list_head *pos; + + list_for_each(pos, &src->pt_list_head) { + struct sync_pt *orig_pt = + container_of(pos, struct sync_pt, pt_list); + struct sync_pt *new_pt = sync_pt_dup(orig_pt); + + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_add(&new_pt->pt_list, &dst->pt_list_head); + } + + return 0; +} + +static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) +{ + struct list_head *src_pos, *dst_pos, *n; + + list_for_each(src_pos, &src->pt_list_head) { + struct sync_pt *src_pt = + container_of(src_pos, struct sync_pt, pt_list); + bool collapsed = false; + + list_for_each_safe(dst_pos, n, &dst->pt_list_head) { + struct sync_pt *dst_pt = + container_of(dst_pos, struct sync_pt, pt_list); + /* collapse two sync_pts on the same timeline + * to a single sync_pt that will signal at + * the later of the two + */ + if (dst_pt->parent == src_pt->parent) { + if (dst_pt->parent->ops->compare(dst_pt, src_pt) + == -1) { + struct sync_pt *new_pt = + sync_pt_dup(src_pt); + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_replace(&dst_pt->pt_list, + &new_pt->pt_list); + sync_pt_free(dst_pt); + } + collapsed = true; + break; + } + } + + if (!collapsed) { + struct sync_pt *new_pt = sync_pt_dup(src_pt); + + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_add(&new_pt->pt_list, &dst->pt_list_head); + } + } + + return 0; +} + +static void sync_fence_detach_pts(struct sync_fence *fence) +{ + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_timeline_remove_pt(pt); + } +} + +static void sync_fence_free_pts(struct sync_fence *fence) +{ + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_pt_free(pt); + } +} + +struct sync_fence *sync_fence_fdget(int fd) +{ + struct file *file = fget(fd); + + if (file == NULL) + return NULL; + + if (file->f_op != &sync_fence_fops) + goto err; + + return file->private_data; + +err: + fput(file); + return NULL; +} +EXPORT_SYMBOL(sync_fence_fdget); + +void sync_fence_put(struct sync_fence *fence) +{ + fput(fence->file); +} +EXPORT_SYMBOL(sync_fence_put); + +void sync_fence_install(struct sync_fence *fence, int fd) +{ + fd_install(fd, fence->file); +} +EXPORT_SYMBOL(sync_fence_install); + +static int sync_fence_get_status(struct sync_fence *fence) +{ + struct list_head *pos; + int status = 1; + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + int pt_status = pt->status; + + if (pt_status < 0) { + status = pt_status; + break; + } else if (status == 1) { + status = pt_status; + } + } + + return status; +} + +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b) +{ + struct sync_fence *fence; + struct list_head *pos; + int err; + + fence = sync_fence_alloc(name); + if (fence == NULL) + return NULL; + + err = sync_fence_copy_pts(fence, a); + if (err < 0) + goto err; + + err = sync_fence_merge_pts(fence, b); + if (err < 0) + goto err; + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + sync_pt_activate(pt); + } + + /* + * signal the fence in case one of it's pts were activated before + * they were activated + */ + sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, + struct sync_pt, + pt_list)); + + return fence; +err: + sync_fence_free_pts(fence); + kfree(fence); + return NULL; +} +EXPORT_SYMBOL(sync_fence_merge); + +static void sync_fence_signal_pt(struct sync_pt *pt) +{ + LIST_HEAD(signaled_waiters); + struct sync_fence *fence = pt->fence; + struct list_head *pos; + struct list_head *n; + unsigned long flags; + int status; + + status = sync_fence_get_status(fence); + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + /* + * this should protect against two threads racing on the signaled + * false -> true transition + */ + if (status && !fence->status) { + list_for_each_safe(pos, n, &fence->waiter_list_head) + list_move(pos, &signaled_waiters); + + fence->status = status; + } else { + status = 0; + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + + if (status) { + list_for_each_safe(pos, n, &signaled_waiters) { + struct sync_fence_waiter *waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + + list_del(pos); + waiter->callback(fence, waiter); + } + wake_up(&fence->wq); + } +} + +int sync_fence_wait_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + + if (fence->status) { + err = fence->status; + goto out; + } + + list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); +out: + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + + return err; +} +EXPORT_SYMBOL(sync_fence_wait_async); + +int sync_fence_cancel_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + struct list_head *pos; + struct list_head *n; + unsigned long flags; + int ret = -ENOENT; + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + /* + * Make sure waiter is still in waiter_list because it is possible for + * the waiter to be removed from the list while the callback is still + * pending. + */ + list_for_each_safe(pos, n, &fence->waiter_list_head) { + struct sync_fence_waiter *list_waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + if (list_waiter == waiter) { + list_del(pos); + ret = 0; + break; + } + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + return ret; +} +EXPORT_SYMBOL(sync_fence_cancel_async); + +static bool sync_fence_check(struct sync_fence *fence) +{ + /* + * Make sure that reads to fence->status are ordered with the + * wait queue event triggering + */ + smp_rmb(); + return fence->status != 0; +} + +int sync_fence_wait(struct sync_fence *fence, long timeout) +{ + int err = 0; + struct sync_pt *pt; + + trace_sync_wait(fence, 1); + list_for_each_entry(pt, &fence->pt_list_head, pt_list) + trace_sync_pt(pt); + + if (timeout > 0) { + timeout = msecs_to_jiffies(timeout); + err = wait_event_interruptible_timeout(fence->wq, + sync_fence_check(fence), + timeout); + } else if (timeout < 0) { + err = wait_event_interruptible(fence->wq, + sync_fence_check(fence)); + } + trace_sync_wait(fence, 0); + + if (err < 0) + return err; + + if (fence->status < 0) { + pr_info("fence error %d on [%p]\n", fence->status, fence); + sync_dump(); + return fence->status; + } + + if (fence->status == 0) { + if (timeout > 0) { + pr_info("fence timeout on [%p] after %dms\n", fence, + jiffies_to_msecs(timeout)); + sync_dump(); + } + return -ETIME; + } + + return 0; +} +EXPORT_SYMBOL(sync_fence_wait); + +static void sync_fence_free(struct kref *kref) +{ + struct sync_fence *fence = container_of(kref, struct sync_fence, kref); + + sync_fence_free_pts(fence); + + kfree(fence); +} + +static int sync_fence_release(struct inode *inode, struct file *file) +{ + struct sync_fence *fence = file->private_data; + unsigned long flags; + + /* + * We need to remove all ways to access this fence before droping + * our ref. + * + * start with its membership in the global fence list + */ + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_del(&fence->sync_fence_list); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + + /* + * remove its pts from their parents so that sync_timeline_signal() + * can't reference the fence. + */ + sync_fence_detach_pts(fence); + + kref_put(&fence->kref, sync_fence_free); + + return 0; +} + +static unsigned int sync_fence_poll(struct file *file, poll_table *wait) +{ + struct sync_fence *fence = file->private_data; + + poll_wait(file, &fence->wq, wait); + + /* + * Make sure that reads to fence->status are ordered with the + * wait queue event triggering + */ + smp_rmb(); + + if (fence->status == 1) + return POLLIN; + else if (fence->status < 0) + return POLLERR; + else + return 0; +} + +static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) +{ + __s32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + return sync_fence_wait(fence, value); +} + +static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) +{ + int fd = get_unused_fd(); + int err; + struct sync_fence *fence2, *fence3; + struct sync_merge_data data; + + if (fd < 0) + return fd; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + err = -EFAULT; + goto err_put_fd; + } + + fence2 = sync_fence_fdget(data.fd2); + if (fence2 == NULL) { + err = -ENOENT; + goto err_put_fd; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence3 = sync_fence_merge(data.name, fence, fence2); + if (fence3 == NULL) { + err = -ENOMEM; + goto err_put_fence2; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + err = -EFAULT; + goto err_put_fence3; + } + + sync_fence_install(fence3, fd); + sync_fence_put(fence2); + return 0; + +err_put_fence3: + sync_fence_put(fence3); + +err_put_fence2: + sync_fence_put(fence2); + +err_put_fd: + put_unused_fd(fd); + return err; +} + +static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) +{ + struct sync_pt_info *info = data; + int ret; + + if (size < sizeof(struct sync_pt_info)) + return -ENOMEM; + + info->len = sizeof(struct sync_pt_info); + + if (pt->parent->ops->fill_driver_data) { + ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, + size - sizeof(*info)); + if (ret < 0) + return ret; + + info->len += ret; + } + + strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); + strlcpy(info->driver_name, pt->parent->ops->driver_name, + sizeof(info->driver_name)); + info->status = pt->status; + info->timestamp_ns = ktime_to_ns(pt->timestamp); + + return info->len; +} + +static long sync_fence_ioctl_fence_info(struct sync_fence *fence, + unsigned long arg) +{ + struct sync_fence_info_data *data; + struct list_head *pos; + __u32 size; + __u32 len = 0; + int ret; + + if (copy_from_user(&size, (void __user *)arg, sizeof(size))) + return -EFAULT; + + if (size < sizeof(struct sync_fence_info_data)) + return -EINVAL; + + if (size > 4096) + size = 4096; + + data = kzalloc(size, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + strlcpy(data->name, fence->name, sizeof(data->name)); + data->status = fence->status; + len = sizeof(struct sync_fence_info_data); + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + + ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); + + if (ret < 0) + goto out; + + len += ret; + } + + data->len = len; + + if (copy_to_user((void __user *)arg, data, len)) + ret = -EFAULT; + else + ret = 0; + +out: + kfree(data); + + return ret; +} + +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sync_fence *fence = file->private_data; + switch (cmd) { + case SYNC_IOC_WAIT: + return sync_fence_ioctl_wait(fence, arg); + + case SYNC_IOC_MERGE: + return sync_fence_ioctl_merge(fence, arg); + + case SYNC_IOC_FENCE_INFO: + return sync_fence_ioctl_fence_info(fence, arg); + + default: + return -ENOTTY; + } +} + +#ifdef CONFIG_DEBUG_FS +static const char *sync_status_str(int status) +{ + if (status > 0) + return "signaled"; + else if (status == 0) + return "active"; + else + return "error"; +} + +static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) +{ + int status = pt->status; + seq_printf(s, " %s%spt %s", + fence ? pt->parent->name : "", + fence ? "_" : "", + sync_status_str(status)); + if (pt->status) { + struct timeval tv = ktime_to_timeval(pt->timestamp); + seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); + } + + if (pt->parent->ops->timeline_value_str && + pt->parent->ops->pt_value_str) { + char value[64]; + pt->parent->ops->pt_value_str(pt, value, sizeof(value)); + seq_printf(s, ": %s", value); + if (fence) { + pt->parent->ops->timeline_value_str(pt->parent, value, + sizeof(value)); + seq_printf(s, " / %s", value); + } + } else if (pt->parent->ops->print_pt) { + seq_printf(s, ": "); + pt->parent->ops->print_pt(s, pt); + } + + seq_printf(s, "\n"); +} + +static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); + + if (obj->ops->timeline_value_str) { + char value[64]; + obj->ops->timeline_value_str(obj, value, sizeof(value)); + seq_printf(s, ": %s", value); + } else if (obj->ops->print_obj) { + seq_printf(s, ": "); + obj->ops->print_obj(s, obj); + } + + seq_printf(s, "\n"); + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_for_each(pos, &obj->child_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, child_list); + sync_print_pt(s, pt, false); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "[%p] %s: %s\n", fence, fence->name, + sync_status_str(fence->status)); + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + sync_print_pt(s, pt, true); + } + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + list_for_each(pos, &fence->waiter_list_head) { + struct sync_fence_waiter *waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + + seq_printf(s, "waiter %pF\n", waiter->callback); + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); +} + +static int sync_debugfs_show(struct seq_file *s, void *unused) +{ + unsigned long flags; + struct list_head *pos; + + seq_printf(s, "objs:\n--------------\n"); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_for_each(pos, &sync_timeline_list_head) { + struct sync_timeline *obj = + container_of(pos, struct sync_timeline, + sync_timeline_list); + + sync_print_obj(s, obj); + seq_printf(s, "\n"); + } + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + seq_printf(s, "fences:\n--------------\n"); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_for_each(pos, &sync_fence_list_head) { + struct sync_fence *fence = + container_of(pos, struct sync_fence, sync_fence_list); + + sync_print_fence(s, fence); + seq_printf(s, "\n"); + } + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + return 0; +} + +static int sync_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, sync_debugfs_show, inode->i_private); +} + +static const struct file_operations sync_debugfs_fops = { + .open = sync_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static __init int sync_debugfs_init(void) +{ + debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); + return 0; +} +late_initcall(sync_debugfs_init); + +#define DUMP_CHUNK 256 +static char sync_dump_buf[64 * 1024]; +void sync_dump(void) +{ + struct seq_file s = { + .buf = sync_dump_buf, + .size = sizeof(sync_dump_buf) - 1, + }; + int i; + + sync_debugfs_show(&s, NULL); + + for (i = 0; i < s.count; i += DUMP_CHUNK) { + if ((s.count - i) > DUMP_CHUNK) { + char c = s.buf[i + DUMP_CHUNK]; + s.buf[i + DUMP_CHUNK] = 0; + pr_cont("%s", s.buf + i); + s.buf[i + DUMP_CHUNK] = c; + } else { + s.buf[s.count] = 0; + pr_cont("%s", s.buf + i); + } + } +} +#else +static void sync_dump(void) +{ +} +#endif diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h new file mode 100644 index 000000000000..38ea986dc70f --- /dev/null +++ b/drivers/staging/android/sync.h @@ -0,0 +1,426 @@ +/* + * include/linux/sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_H +#define _LINUX_SYNC_H + +#include <linux/types.h> +#ifdef __KERNEL__ + +#include <linux/kref.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/wait.h> + +struct sync_timeline; +struct sync_pt; +struct sync_fence; + +/** + * struct sync_timeline_ops - sync object implementation ops + * @driver_name: name of the implentation + * @dup: duplicate a sync_pt + * @has_signaled: returns: + * 1 if pt has signaled + * 0 if pt has not signaled + * <0 on error + * @compare: returns: + * 1 if b will signal before a + * 0 if a and b will signal at the same time + * -1 if a will signabl before b + * @free_pt: called before sync_pt is freed + * @release_obj: called before sync_timeline is freed + * @print_obj: deprecated + * @print_pt: deprecated + * @fill_driver_data: write implmentation specific driver data to data. + * should return an error if there is not enough room + * as specified by size. This information is returned + * to userspace by SYNC_IOC_FENCE_INFO. + * @timeline_value_str: fill str with the value of the sync_timeline's counter + * @pt_value_str: fill str with the value of the sync_pt + */ +struct sync_timeline_ops { + const char *driver_name; + + /* required */ + struct sync_pt *(*dup)(struct sync_pt *pt); + + /* required */ + int (*has_signaled)(struct sync_pt *pt); + + /* required */ + int (*compare)(struct sync_pt *a, struct sync_pt *b); + + /* optional */ + void (*free_pt)(struct sync_pt *sync_pt); + + /* optional */ + void (*release_obj)(struct sync_timeline *sync_timeline); + + /* deprecated */ + void (*print_obj)(struct seq_file *s, + struct sync_timeline *sync_timeline); + + /* deprecated */ + void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt); + + /* optional */ + int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); + + /* optional */ + void (*timeline_value_str)(struct sync_timeline *timeline, char *str, + int size); + + /* optional */ + void (*pt_value_str)(struct sync_pt *pt, char *str, int size); +}; + +/** + * struct sync_timeline - sync object + * @kref: reference count on fence. + * @ops: ops that define the implementaiton of the sync_timeline + * @name: name of the sync_timeline. Useful for debugging + * @destoryed: set when sync_timeline is destroyed + * @child_list_head: list of children sync_pts for this sync_timeline + * @child_list_lock: lock protecting @child_list_head, destroyed, and + * sync_pt.status + * @active_list_head: list of active (unsignaled/errored) sync_pts + * @sync_timeline_list: membership in global sync_timeline_list + */ +struct sync_timeline { + struct kref kref; + const struct sync_timeline_ops *ops; + char name[32]; + + /* protected by child_list_lock */ + bool destroyed; + + struct list_head child_list_head; + spinlock_t child_list_lock; + + struct list_head active_list_head; + spinlock_t active_list_lock; + + struct list_head sync_timeline_list; +}; + +/** + * struct sync_pt - sync point + * @parent: sync_timeline to which this sync_pt belongs + * @child_list: membership in sync_timeline.child_list_head + * @active_list: membership in sync_timeline.active_list_head + * @signaled_list: membership in temorary signaled_list on stack + * @fence: sync_fence to which the sync_pt belongs + * @pt_list: membership in sync_fence.pt_list_head + * @status: 1: signaled, 0:active, <0: error + * @timestamp: time which sync_pt status transitioned from active to + * singaled or error. + */ +struct sync_pt { + struct sync_timeline *parent; + struct list_head child_list; + + struct list_head active_list; + struct list_head signaled_list; + + struct sync_fence *fence; + struct list_head pt_list; + + /* protected by parent->active_list_lock */ + int status; + + ktime_t timestamp; +}; + +/** + * struct sync_fence - sync fence + * @file: file representing this fence + * @kref: referenace count on fence. + * @name: name of sync_fence. Useful for debugging + * @pt_list_head: list of sync_pts in ths fence. immutable once fence + * is created + * @waiter_list_head: list of asynchronous waiters on this fence + * @waiter_list_lock: lock protecting @waiter_list_head and @status + * @status: 1: signaled, 0:active, <0: error + * + * @wq: wait queue for fence signaling + * @sync_fence_list: membership in global fence list + */ +struct sync_fence { + struct file *file; + struct kref kref; + char name[32]; + + /* this list is immutable once the fence is created */ + struct list_head pt_list_head; + + struct list_head waiter_list_head; + spinlock_t waiter_list_lock; /* also protects status */ + int status; + + wait_queue_head_t wq; + + struct list_head sync_fence_list; +}; + +struct sync_fence_waiter; +typedef void (*sync_callback_t)(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * struct sync_fence_waiter - metadata for asynchronous waiter on a fence + * @waiter_list: membership in sync_fence.waiter_list_head + * @callback: function pointer to call when fence signals + * @callback_data: pointer to pass to @callback + */ +struct sync_fence_waiter { + struct list_head waiter_list; + + sync_callback_t callback; +}; + +static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, + sync_callback_t callback) +{ + waiter->callback = callback; +} + +/* + * API for sync_timeline implementers + */ + +/** + * sync_timeline_create() - creates a sync object + * @ops: specifies the implemention ops for the object + * @size: size to allocate for this obj + * @name: sync_timeline name + * + * Creates a new sync_timeline which will use the implemetation specified by + * @ops. @size bytes will be allocated allowing for implemntation specific + * data to be kept after the generic sync_timeline stuct. + */ +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name); + +/** + * sync_timeline_destory() - destorys a sync object + * @obj: sync_timeline to destroy + * + * A sync implemntation should call this when the @obj is going away + * (i.e. module unload.) @obj won't actually be freed until all its childern + * sync_pts are freed. + */ +void sync_timeline_destroy(struct sync_timeline *obj); + +/** + * sync_timeline_signal() - signal a status change on a sync_timeline + * @obj: sync_timeline to signal + * + * A sync implemntation should call this any time one of it's sync_pts + * has signaled or has an error condition. + */ +void sync_timeline_signal(struct sync_timeline *obj); + +/** + * sync_pt_create() - creates a sync pt + * @parent: sync_pt's parent sync_timeline + * @size: size to allocate for this pt + * + * Creates a new sync_pt as a chiled of @parent. @size bytes will be + * allocated allowing for implemntation specific data to be kept after + * the generic sync_timeline struct. + */ +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); + +/** + * sync_pt_free() - frees a sync pt + * @pt: sync_pt to free + * + * This should only be called on sync_pts which have been created but + * not added to a fence. + */ +void sync_pt_free(struct sync_pt *pt); + +/** + * sync_fence_create() - creates a sync fence + * @name: name of fence to create + * @pt: sync_pt to add to the fence + * + * Creates a fence containg @pt. Once this is called, the fence takes + * ownership of @pt. + */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt); + +/* + * API for sync_fence consumers + */ + +/** + * sync_fence_merge() - merge two fences + * @name: name of new fence + * @a: fence a + * @b: fence b + * + * Creates a new fence which contains copies of all the sync_pts in both + * @a and @b. @a and @b remain valid, independent fences. + */ +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b); + +/** + * sync_fence_fdget() - get a fence from an fd + * @fd: fd referencing a fence + * + * Ensures @fd references a valid fence, increments the refcount of the backing + * file, and returns the fence. + */ +struct sync_fence *sync_fence_fdget(int fd); + +/** + * sync_fence_put() - puts a refernnce of a sync fence + * @fence: fence to put + * + * Puts a reference on @fence. If this is the last reference, the fence and + * all it's sync_pts will be freed + */ +void sync_fence_put(struct sync_fence *fence); + +/** + * sync_fence_install() - installs a fence into a file descriptor + * @fence: fence to instal + * @fd: file descriptor in which to install the fence + * + * Installs @fence into @fd. @fd's should be acquired through get_unused_fd(). + */ +void sync_fence_install(struct sync_fence *fence, int fd); + +/** + * sync_fence_wait_async() - registers and async wait on the fence + * @fence: fence to wait on + * @waiter: waiter callback struck + * + * Returns 1 if @fence has already signaled. + * + * Registers a callback to be called when @fence signals or has an error. + * @waiter should be initialized with sync_fence_waiter_init(). + */ +int sync_fence_wait_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * sync_fence_cancel_async() - cancels an async wait + * @fence: fence to wait on + * @waiter: waiter callback struck + * + * returns 0 if waiter was removed from fence's async waiter list. + * returns -ENOENT if waiter was not found on fence's async waiter list. + * + * Cancels a previously registered async wait. Will fail gracefully if + * @waiter was never registered or if @fence has already signaled @waiter. + */ +int sync_fence_cancel_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * sync_fence_wait() - wait on fence + * @fence: fence to wait on + * @tiemout: timeout in ms + * + * Wait for @fence to be signaled or have an error. Waits indefinitely + * if @timeout < 0 + */ +int sync_fence_wait(struct sync_fence *fence, long timeout); + +#endif /* __KERNEL__ */ + +/** + * struct sync_merge_data - data passed to merge ioctl + * @fd2: file descriptor of second fence + * @name: name of new fence + * @fence: returns the fd of the new fence to userspace + */ +struct sync_merge_data { + __s32 fd2; /* fd of second fence */ + char name[32]; /* name of new fence */ + __s32 fence; /* fd on newly created fence */ +}; + +/** + * struct sync_pt_info - detailed sync_pt information + * @len: length of sync_pt_info including any driver_data + * @obj_name: name of parent sync_timeline + * @driver_name: name of driver implmenting the parent + * @status: status of the sync_pt 0:active 1:signaled <0:error + * @timestamp_ns: timestamp of status change in nanoseconds + * @driver_data: any driver dependant data + */ +struct sync_pt_info { + __u32 len; + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u64 timestamp_ns; + + __u8 driver_data[0]; +}; + +/** + * struct sync_fence_info_data - data returned from fence info ioctl + * @len: ioctl caller writes the size of the buffer its passing in. + * ioctl returns length of sync_fence_data reutnred to userspace + * including pt_info. + * @name: name of fence + * @status: status of fence. 1: signaled 0:active <0:error + * @pt_info: a sync_pt_info struct for every sync_pt in the fence + */ +struct sync_fence_info_data { + __u32 len; + char name[32]; + __s32 status; + + __u8 pt_info[0]; +}; + +#define SYNC_IOC_MAGIC '>' + +/** + * DOC: SYNC_IOC_WAIT - wait for a fence to signal + * + * pass timeout in milliseconds. Waits indefinitely timeout < 0. + */ +#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32) + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data. Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) + +/** + * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence + * + * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Caller should write the size of the buffer into len. On return, len is + * updated to reflect the total size of the sync_fence_info_data including + * pt_info. + * + * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. + * To itterate over the sync_pt_infos, use the sync_pt_info.len field. + */ +#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ + struct sync_fence_info_data) + +#endif /* _LINUX_SYNC_H */ diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h new file mode 100644 index 000000000000..95462359ba57 --- /dev/null +++ b/drivers/staging/android/trace/sync.h @@ -0,0 +1,82 @@ +#undef TRACE_SYSTEM +#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace +#define TRACE_SYSTEM sync + +#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SYNC_H + +#include "../sync.h" +#include <linux/tracepoint.h> + +TRACE_EVENT(sync_timeline, + TP_PROTO(struct sync_timeline *timeline), + + TP_ARGS(timeline), + + TP_STRUCT__entry( + __string(name, timeline->name) + __array(char, value, 32) + ), + + TP_fast_assign( + __assign_str(name, timeline->name); + if (timeline->ops->timeline_value_str) { + timeline->ops->timeline_value_str(timeline, + __entry->value, + sizeof(__entry->value)); + } else { + __entry->value[0] = '\0'; + } + ), + + TP_printk("name=%s value=%s", __get_str(name), __entry->value) +); + +TRACE_EVENT(sync_wait, + TP_PROTO(struct sync_fence *fence, int begin), + + TP_ARGS(fence, begin), + + TP_STRUCT__entry( + __string(name, fence->name) + __field(s32, status) + __field(u32, begin) + ), + + TP_fast_assign( + __assign_str(name, fence->name); + __entry->status = fence->status; + __entry->begin = begin; + ), + + TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end", + __get_str(name), __entry->status) +); + +TRACE_EVENT(sync_pt, + TP_PROTO(struct sync_pt *pt), + + TP_ARGS(pt), + + TP_STRUCT__entry( + __string(timeline, pt->parent->name) + __array(char, value, 32) + ), + + TP_fast_assign( + __assign_str(timeline, pt->parent->name); + if (pt->parent->ops->pt_value_str) { + pt->parent->ops->pt_value_str(pt, __entry->value, + sizeof(__entry->value)); + } else { + __entry->value[0] = '\0'; + } + ), + + TP_printk("name=%s value=%s", __get_str(timeline), __entry->value) +); + +#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> |