summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-15 03:29:25 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-15 03:29:25 +0300
commit6fffab6676853d52cfdbb030365354252a66a20a (patch)
tree7153c4cb8956e0fb7a04fd8a6e6602e96ec85b4d /include
parenta3d1f54d7aa4c3be2c6a10768d4ffa1dcb620da9 (diff)
parent7b72ab2c6a468305449db8f204bf1e406fd3e147 (diff)
downloadlinux-6fffab6676853d52cfdbb030365354252a66a20a.tar.xz
Merge tag 'dlm-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm
Pull dlm updates from David Teigland: "This set includes some small fixes, and some big internal changes: - Fix a long standing race between the unlock callback for the last lkb struct, and removing the rsb that became unused after the final unlock. This could lead different nodes to inconsistent info about the rsb master node. - Remove unnecessary refcounting on callback structs, returning to the way things were done in the past. - Do message processing in softirq context. This allows dlm messages to be cleared more quickly and efficiently, reducing long lists of incomplete requests. A future change to run callbacks directly from this context will make this more effective. - The softirq message processing involved a number of patches changing mutexes to spinlocks and rwlocks, and a fair amount of code re-org in preparation. - Use an rhashtable for rsb structs, rather than our old internal hash table implementation. This also required some re-org of lists and locks preparation for the change. - Drop the dlm_scand kthread, and use timers to clear unused rsb structs. Scanning all rsb's periodically was a lot of wasted work. - Fix recent regression in logic for copying LVB data in user space lock requests" * tag 'dlm-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm: (34 commits) dlm: return -ENOMEM if ls_recover_buf fails dlm: fix sleep in atomic context dlm: use rwlock for lkbidr dlm: use rwlock for rsb hash table dlm: drop dlm_scand kthread and use timers dlm: do not use ref counts for rsb in the toss state dlm: switch to use rhashtable for rsbs dlm: add rsb lists for iteration dlm: merge toss and keep hash table lists into one list dlm: change to single hashtable lock dlm: increment ls_count for dlm_scand dlm: do message processing in softirq context dlm: use spin_lock_bh for message processing dlm: remove schedule in receive path dlm: convert ls_recv_active from rw_semaphore to rwlock dlm: avoid blocking receive at the end of recovery dlm: convert res_lock to spinlock dlm: convert ls_waiters_mutex to spinlock dlm: drop mutex use in waiters recovery dlm: add new struct to save position in dlm_copy_master_names ...
Diffstat (limited to 'include')
-rw-r--r--include/trace/events/dlm.h46
1 files changed, 19 insertions, 27 deletions
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
index c1a146f9fc91..af160082c9e3 100644
--- a/include/trace/events/dlm.h
+++ b/include/trace/events/dlm.h
@@ -189,29 +189,25 @@ TRACE_EVENT(dlm_lock_end,
TRACE_EVENT(dlm_bast,
- TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode),
+ TP_PROTO(__u32 ls_id, __u32 lkb_id, int mode,
+ const char *res_name, size_t res_length),
- TP_ARGS(ls, lkb, mode),
+ TP_ARGS(ls_id, lkb_id, mode, res_name, res_length),
TP_STRUCT__entry(
__field(__u32, ls_id)
__field(__u32, lkb_id)
__field(int, mode)
- __dynamic_array(unsigned char, res_name,
- lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ __dynamic_array(unsigned char, res_name, res_length)
),
TP_fast_assign(
- struct dlm_rsb *r;
-
- __entry->ls_id = ls->ls_global_id;
- __entry->lkb_id = lkb->lkb_id;
+ __entry->ls_id = ls_id;
+ __entry->lkb_id = lkb_id;
__entry->mode = mode;
- r = lkb->lkb_resource;
- if (r)
- memcpy(__get_dynamic_array(res_name), r->res_name,
- __get_dynamic_array_len(res_name));
+ memcpy(__get_dynamic_array(res_name), res_name,
+ __get_dynamic_array_len(res_name));
),
TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s",
@@ -224,31 +220,27 @@ TRACE_EVENT(dlm_bast,
TRACE_EVENT(dlm_ast,
- TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb),
+ TP_PROTO(__u32 ls_id, __u32 lkb_id, __u8 sb_flags, int sb_status,
+ const char *res_name, size_t res_length),
- TP_ARGS(ls, lkb),
+ TP_ARGS(ls_id, lkb_id, sb_flags, sb_status, res_name, res_length),
TP_STRUCT__entry(
__field(__u32, ls_id)
__field(__u32, lkb_id)
- __field(u8, sb_flags)
+ __field(__u8, sb_flags)
__field(int, sb_status)
- __dynamic_array(unsigned char, res_name,
- lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+ __dynamic_array(unsigned char, res_name, res_length)
),
TP_fast_assign(
- struct dlm_rsb *r;
-
- __entry->ls_id = ls->ls_global_id;
- __entry->lkb_id = lkb->lkb_id;
- __entry->sb_flags = lkb->lkb_lksb->sb_flags;
- __entry->sb_status = lkb->lkb_lksb->sb_status;
+ __entry->ls_id = ls_id;
+ __entry->lkb_id = lkb_id;
+ __entry->sb_flags = sb_flags;
+ __entry->sb_status = sb_status;
- r = lkb->lkb_resource;
- if (r)
- memcpy(__get_dynamic_array(res_name), r->res_name,
- __get_dynamic_array_len(res_name));
+ memcpy(__get_dynamic_array(res_name), res_name,
+ __get_dynamic_array_len(res_name));
),
TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s",