dlm: convert rsb list to rb_tree

Change the linked lists to rb_tree's in the rsb
hash table to speed up searches.  Slow rsb searches
were having a large impact on gfs2 performance due
to the large number of dlm locks gfs2 uses.

Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
此提交包含在:
Bob Peterson
2011-10-26 15:24:55 -05:00
提交者 David Teigland
父節點 c3b92c8787
當前提交 9beb3bf5a9
共有 5 個檔案被更改,包括 112 行新增54 行删除

查看文件

@@ -393,6 +393,7 @@ static const struct seq_operations format3_seq_ops;
static void *table_seq_start(struct seq_file *seq, loff_t *pos)
{
struct rb_node *node;
struct dlm_ls *ls = seq->private;
struct rsbtbl_iter *ri;
struct dlm_rsb *r;
@@ -418,9 +419,10 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
ri->format = 3;
spin_lock(&ls->ls_rsbtbl[bucket].lock);
if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list,
res_hashchain) {
if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
for (node = rb_first(&ls->ls_rsbtbl[bucket].keep); node;
node = rb_next(node)) {
r = rb_entry(node, struct dlm_rsb, res_hashnode);
if (!entry--) {
dlm_hold_rsb(r);
ri->rsb = r;
@@ -449,9 +451,9 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
}
spin_lock(&ls->ls_rsbtbl[bucket].lock);
if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
struct dlm_rsb, res_hashchain);
if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
node = rb_first(&ls->ls_rsbtbl[bucket].keep);
r = rb_entry(node, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r);
ri->rsb = r;
ri->bucket = bucket;
@@ -467,7 +469,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
{
struct dlm_ls *ls = seq->private;
struct rsbtbl_iter *ri = iter_ptr;
struct list_head *next;
struct rb_node *next;
struct dlm_rsb *r, *rp;
loff_t n = *pos;
unsigned bucket;
@@ -480,10 +482,10 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
spin_lock(&ls->ls_rsbtbl[bucket].lock);
rp = ri->rsb;
next = rp->res_hashchain.next;
next = rb_next(&rp->res_hashnode);
if (next != &ls->ls_rsbtbl[bucket].list) {
r = list_entry(next, struct dlm_rsb, res_hashchain);
if (next) {
r = rb_entry(next, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r);
ri->rsb = r;
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
@@ -511,9 +513,9 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
}
spin_lock(&ls->ls_rsbtbl[bucket].lock);
if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
struct dlm_rsb, res_hashchain);
if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
next = rb_first(&ls->ls_rsbtbl[bucket].keep);
r = rb_entry(next, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r);
ri->rsb = r;
ri->bucket = bucket;