Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client

Pull Ceph updates from Sage Weil:
 "This changeset has a few main parts:

   - Ilya has finished a huge refactoring effort to sync up the
     client-side logic in libceph with the user-space client code, which
     has evolved significantly over the last couple years, with lots of
     additional behaviors (e.g., how requests are handled when cluster
     is full and transitions from full to non-full).

     This structure of the code is more closely aligned with userspace
     now such that it will be much easier to maintain going forward when
     behavior changes take place.  There are some locking improvements
     bundled in as well.

   - Zheng adds multi-filesystem support (multiple namespaces within the
     same Ceph cluster)

   - Zheng has changed the readdir offsets and directory enumeration so
     that dentry offsets are hash-based and therefore stable across
     directory fragmentation events on the MDS.

   - Zheng has a smorgasbord of bug fixes across fs/ceph"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: (71 commits)
  ceph: fix wake_up_session_cb()
  ceph: don't use truncate_pagecache() to invalidate read cache
  ceph: SetPageError() for writeback pages if writepages fails
  ceph: handle interrupted ceph_writepage()
  ceph: make ceph_update_writeable_page() uninterruptible
  libceph: make ceph_osdc_wait_request() uninterruptible
  ceph: handle -EAGAIN returned by ceph_update_writeable_page()
  ceph: make fault/page_mkwrite return VM_FAULT_OOM for -ENOMEM
  ceph: block non-fatal signals for fault/page_mkwrite
  ceph: make logical calculation functions return bool
  ceph: tolerate bad i_size for symlink inode
  ceph: improve fragtree change detection
  ceph: keep leaf frag when updating fragtree
  ceph: fix dir_auth check in ceph_fill_dirfrag()
  ceph: don't assume frag tree splits in mds reply are sorted
  ceph: fix inode reference leak
  ceph: using hash value to compose dentry offset
  ceph: don't forbid marking directory complete after forward seek
  ceph: record 'offset' for each entry of readdir result
  ceph: define 'end/complete' in readdir reply as bit flags
  ...
This commit is contained in:
Linus Torvalds
2016-05-26 14:10:32 -07:00
29 changed files with 4827 additions and 2577 deletions

View File

@@ -651,7 +651,7 @@ EXPORT_SYMBOL(ceph_destroy_client);
/*
* true if we have the mon map (and have thus joined the cluster)
*/
static int have_mon_and_osd_map(struct ceph_client *client)
static bool have_mon_and_osd_map(struct ceph_client *client)
{
return client->monc.monmap && client->monc.monmap->epoch &&
client->osdc.osdmap && client->osdc.osdmap->epoch;

View File

@@ -27,6 +27,22 @@ __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
}
}
const char *ceph_osd_watch_op_name(int o)
{
switch (o) {
case CEPH_OSD_WATCH_OP_UNWATCH:
return "unwatch";
case CEPH_OSD_WATCH_OP_WATCH:
return "watch";
case CEPH_OSD_WATCH_OP_RECONNECT:
return "reconnect";
case CEPH_OSD_WATCH_OP_PING:
return "ping";
default:
return "???";
}
}
const char *ceph_osd_state_name(int s)
{
switch (s) {

View File

@@ -54,24 +54,25 @@ static int osdmap_show(struct seq_file *s, void *p)
{
int i;
struct ceph_client *client = s->private;
struct ceph_osdmap *map = client->osdc.osdmap;
struct ceph_osd_client *osdc = &client->osdc;
struct ceph_osdmap *map = osdc->osdmap;
struct rb_node *n;
if (map == NULL)
return 0;
seq_printf(s, "epoch %d\n", map->epoch);
seq_printf(s, "flags%s%s\n",
(map->flags & CEPH_OSDMAP_NEARFULL) ? " NEARFULL" : "",
(map->flags & CEPH_OSDMAP_FULL) ? " FULL" : "");
down_read(&osdc->lock);
seq_printf(s, "epoch %d flags 0x%x\n", map->epoch, map->flags);
for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) {
struct ceph_pg_pool_info *pool =
struct ceph_pg_pool_info *pi =
rb_entry(n, struct ceph_pg_pool_info, node);
seq_printf(s, "pool %lld pg_num %u (%d) read_tier %lld write_tier %lld\n",
pool->id, pool->pg_num, pool->pg_num_mask,
pool->read_tier, pool->write_tier);
seq_printf(s, "pool %lld '%s' type %d size %d min_size %d pg_num %u pg_num_mask %d flags 0x%llx lfor %u read_tier %lld write_tier %lld\n",
pi->id, pi->name, pi->type, pi->size, pi->min_size,
pi->pg_num, pi->pg_num_mask, pi->flags,
pi->last_force_request_resend, pi->read_tier,
pi->write_tier);
}
for (i = 0; i < map->max_osd; i++) {
struct ceph_entity_addr *addr = &map->osd_addr[i];
@@ -103,6 +104,7 @@ static int osdmap_show(struct seq_file *s, void *p)
pg->pgid.seed, pg->primary_temp.osd);
}
up_read(&osdc->lock);
return 0;
}
@@ -126,6 +128,7 @@ static int monc_show(struct seq_file *s, void *p)
CEPH_SUBSCRIBE_ONETIME ? "" : "+"));
seq_putc(s, '\n');
}
seq_printf(s, "fs_cluster_id %d\n", monc->fs_cluster_id);
for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
__u16 op;
@@ -143,43 +146,113 @@ static int monc_show(struct seq_file *s, void *p)
return 0;
}
static void dump_target(struct seq_file *s, struct ceph_osd_request_target *t)
{
int i;
seq_printf(s, "osd%d\t%llu.%x\t[", t->osd, t->pgid.pool, t->pgid.seed);
for (i = 0; i < t->up.size; i++)
seq_printf(s, "%s%d", (!i ? "" : ","), t->up.osds[i]);
seq_printf(s, "]/%d\t[", t->up.primary);
for (i = 0; i < t->acting.size; i++)
seq_printf(s, "%s%d", (!i ? "" : ","), t->acting.osds[i]);
seq_printf(s, "]/%d\t%*pE\t0x%x", t->acting.primary,
t->target_oid.name_len, t->target_oid.name, t->flags);
if (t->paused)
seq_puts(s, "\tP");
}
static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
{
int i;
seq_printf(s, "%llu\t", req->r_tid);
dump_target(s, &req->r_t);
seq_printf(s, "\t%d\t%u'%llu", req->r_attempts,
le32_to_cpu(req->r_replay_version.epoch),
le64_to_cpu(req->r_replay_version.version));
for (i = 0; i < req->r_num_ops; i++) {
struct ceph_osd_req_op *op = &req->r_ops[i];
seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
ceph_osd_op_name(op->op));
if (op->op == CEPH_OSD_OP_WATCH)
seq_printf(s, "-%s",
ceph_osd_watch_op_name(op->watch.op));
}
seq_putc(s, '\n');
}
static void dump_requests(struct seq_file *s, struct ceph_osd *osd)
{
struct rb_node *n;
mutex_lock(&osd->lock);
for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
struct ceph_osd_request *req =
rb_entry(n, struct ceph_osd_request, r_node);
dump_request(s, req);
}
mutex_unlock(&osd->lock);
}
static void dump_linger_request(struct seq_file *s,
struct ceph_osd_linger_request *lreq)
{
seq_printf(s, "%llu\t", lreq->linger_id);
dump_target(s, &lreq->t);
seq_printf(s, "\t%u\t%s%s/%d\n", lreq->register_gen,
lreq->is_watch ? "W" : "N", lreq->committed ? "C" : "",
lreq->last_error);
}
static void dump_linger_requests(struct seq_file *s, struct ceph_osd *osd)
{
struct rb_node *n;
mutex_lock(&osd->lock);
for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
struct ceph_osd_linger_request *lreq =
rb_entry(n, struct ceph_osd_linger_request, node);
dump_linger_request(s, lreq);
}
mutex_unlock(&osd->lock);
}
static int osdc_show(struct seq_file *s, void *pp)
{
struct ceph_client *client = s->private;
struct ceph_osd_client *osdc = &client->osdc;
struct rb_node *p;
struct rb_node *n;
mutex_lock(&osdc->request_mutex);
for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
struct ceph_osd_request *req;
unsigned int i;
int opcode;
down_read(&osdc->lock);
seq_printf(s, "REQUESTS %d homeless %d\n",
atomic_read(&osdc->num_requests),
atomic_read(&osdc->num_homeless));
for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
req = rb_entry(p, struct ceph_osd_request, r_node);
seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid,
req->r_osd ? req->r_osd->o_osd : -1,
req->r_pgid.pool, req->r_pgid.seed);
seq_printf(s, "%.*s", req->r_base_oid.name_len,
req->r_base_oid.name);
if (req->r_reassert_version.epoch)
seq_printf(s, "\t%u'%llu",
(unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
le64_to_cpu(req->r_reassert_version.version));
else
seq_printf(s, "\t");
for (i = 0; i < req->r_num_ops; i++) {
opcode = req->r_ops[i].op;
seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
ceph_osd_op_name(opcode));
}
seq_printf(s, "\n");
dump_requests(s, osd);
}
mutex_unlock(&osdc->request_mutex);
dump_requests(s, &osdc->homeless_osd);
seq_puts(s, "LINGER REQUESTS\n");
for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
dump_linger_requests(s, osd);
}
dump_linger_requests(s, &osdc->homeless_osd);
up_read(&osdc->lock);
return 0;
}

View File

@@ -260,20 +260,26 @@ static void __send_subscribe(struct ceph_mon_client *monc)
BUG_ON(num < 1); /* monmap sub is always there */
ceph_encode_32(&p, num);
for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
const char *s = ceph_sub_str[i];
char buf[32];
int len;
if (!monc->subs[i].want)
continue;
dout("%s %s start %llu flags 0x%x\n", __func__, s,
len = sprintf(buf, "%s", ceph_sub_str[i]);
if (i == CEPH_SUB_MDSMAP &&
monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE)
len += sprintf(buf + len, ".%d", monc->fs_cluster_id);
dout("%s %s start %llu flags 0x%x\n", __func__, buf,
le64_to_cpu(monc->subs[i].item.start),
monc->subs[i].item.flags);
ceph_encode_string(&p, end, s, strlen(s));
ceph_encode_string(&p, end, buf, len);
memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
p += sizeof(monc->subs[i].item);
}
BUG_ON(p != (end - 35 - (ARRAY_SIZE(monc->subs) - num) * 19));
BUG_ON(p > end);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
ceph_msg_revoke(msg);
@@ -376,19 +382,13 @@ void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch)
}
EXPORT_SYMBOL(ceph_monc_got_map);
/*
* Register interest in the next osdmap
*/
void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
void ceph_monc_renew_subs(struct ceph_mon_client *monc)
{
dout("%s have %u\n", __func__, monc->subs[CEPH_SUB_OSDMAP].have);
mutex_lock(&monc->mutex);
if (__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP,
monc->subs[CEPH_SUB_OSDMAP].have + 1, false))
__send_subscribe(monc);
__send_subscribe(monc);
mutex_unlock(&monc->mutex);
}
EXPORT_SYMBOL(ceph_monc_request_next_osdmap);
EXPORT_SYMBOL(ceph_monc_renew_subs);
/*
* Wait for an osdmap with a given epoch.
@@ -478,51 +478,17 @@ out:
/*
* generic requests (currently statfs, mon_get_version)
*/
static struct ceph_mon_generic_request *__lookup_generic_req(
struct ceph_mon_client *monc, u64 tid)
{
struct ceph_mon_generic_request *req;
struct rb_node *n = monc->generic_request_tree.rb_node;
while (n) {
req = rb_entry(n, struct ceph_mon_generic_request, node);
if (tid < req->tid)
n = n->rb_left;
else if (tid > req->tid)
n = n->rb_right;
else
return req;
}
return NULL;
}
static void __insert_generic_request(struct ceph_mon_client *monc,
struct ceph_mon_generic_request *new)
{
struct rb_node **p = &monc->generic_request_tree.rb_node;
struct rb_node *parent = NULL;
struct ceph_mon_generic_request *req = NULL;
while (*p) {
parent = *p;
req = rb_entry(parent, struct ceph_mon_generic_request, node);
if (new->tid < req->tid)
p = &(*p)->rb_left;
else if (new->tid > req->tid)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new->node, parent, p);
rb_insert_color(&new->node, &monc->generic_request_tree);
}
DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node)
static void release_generic_request(struct kref *kref)
{
struct ceph_mon_generic_request *req =
container_of(kref, struct ceph_mon_generic_request, kref);
dout("%s greq %p request %p reply %p\n", __func__, req, req->request,
req->reply);
WARN_ON(!RB_EMPTY_NODE(&req->node));
if (req->reply)
ceph_msg_put(req->reply);
if (req->request)
@@ -533,7 +499,8 @@ static void release_generic_request(struct kref *kref)
static void put_generic_request(struct ceph_mon_generic_request *req)
{
kref_put(&req->kref, release_generic_request);
if (req)
kref_put(&req->kref, release_generic_request);
}
static void get_generic_request(struct ceph_mon_generic_request *req)
@@ -541,6 +508,103 @@ static void get_generic_request(struct ceph_mon_generic_request *req)
kref_get(&req->kref);
}
static struct ceph_mon_generic_request *
alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp)
{
struct ceph_mon_generic_request *req;
req = kzalloc(sizeof(*req), gfp);
if (!req)
return NULL;
req->monc = monc;
kref_init(&req->kref);
RB_CLEAR_NODE(&req->node);
init_completion(&req->completion);
dout("%s greq %p\n", __func__, req);
return req;
}
static void register_generic_request(struct ceph_mon_generic_request *req)
{
struct ceph_mon_client *monc = req->monc;
WARN_ON(req->tid);
get_generic_request(req);
req->tid = ++monc->last_tid;
insert_generic_request(&monc->generic_request_tree, req);
}
static void send_generic_request(struct ceph_mon_client *monc,
struct ceph_mon_generic_request *req)
{
WARN_ON(!req->tid);
dout("%s greq %p tid %llu\n", __func__, req, req->tid);
req->request->hdr.tid = cpu_to_le64(req->tid);
ceph_con_send(&monc->con, ceph_msg_get(req->request));
}
static void __finish_generic_request(struct ceph_mon_generic_request *req)
{
struct ceph_mon_client *monc = req->monc;
dout("%s greq %p tid %llu\n", __func__, req, req->tid);
erase_generic_request(&monc->generic_request_tree, req);
ceph_msg_revoke(req->request);
ceph_msg_revoke_incoming(req->reply);
}
static void finish_generic_request(struct ceph_mon_generic_request *req)
{
__finish_generic_request(req);
put_generic_request(req);
}
static void complete_generic_request(struct ceph_mon_generic_request *req)
{
if (req->complete_cb)
req->complete_cb(req);
else
complete_all(&req->completion);
put_generic_request(req);
}
void cancel_generic_request(struct ceph_mon_generic_request *req)
{
struct ceph_mon_client *monc = req->monc;
struct ceph_mon_generic_request *lookup_req;
dout("%s greq %p tid %llu\n", __func__, req, req->tid);
mutex_lock(&monc->mutex);
lookup_req = lookup_generic_request(&monc->generic_request_tree,
req->tid);
if (lookup_req) {
WARN_ON(lookup_req != req);
finish_generic_request(req);
}
mutex_unlock(&monc->mutex);
}
static int wait_generic_request(struct ceph_mon_generic_request *req)
{
int ret;
dout("%s greq %p tid %llu\n", __func__, req, req->tid);
ret = wait_for_completion_interruptible(&req->completion);
if (ret)
cancel_generic_request(req);
else
ret = req->result; /* completed */
return ret;
}
static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip)
@@ -551,7 +615,7 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
struct ceph_msg *m;
mutex_lock(&monc->mutex);
req = __lookup_generic_req(monc, tid);
req = lookup_generic_request(&monc->generic_request_tree, tid);
if (!req) {
dout("get_generic_reply %lld dne\n", tid);
*skip = 1;
@@ -570,42 +634,6 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
return m;
}
static int __do_generic_request(struct ceph_mon_client *monc, u64 tid,
struct ceph_mon_generic_request *req)
{
int err;
/* register request */
req->tid = tid != 0 ? tid : ++monc->last_tid;
req->request->hdr.tid = cpu_to_le64(req->tid);
__insert_generic_request(monc, req);
monc->num_generic_requests++;
ceph_con_send(&monc->con, ceph_msg_get(req->request));
mutex_unlock(&monc->mutex);
err = wait_for_completion_interruptible(&req->completion);
mutex_lock(&monc->mutex);
rb_erase(&req->node, &monc->generic_request_tree);
monc->num_generic_requests--;
if (!err)
err = req->result;
return err;
}
static int do_generic_request(struct ceph_mon_client *monc,
struct ceph_mon_generic_request *req)
{
int err;
mutex_lock(&monc->mutex);
err = __do_generic_request(monc, 0, req);
mutex_unlock(&monc->mutex);
return err;
}
/*
* statfs
*/
@@ -616,22 +644,24 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
u64 tid = le64_to_cpu(msg->hdr.tid);
dout("%s msg %p tid %llu\n", __func__, msg, tid);
if (msg->front.iov_len != sizeof(*reply))
goto bad;
dout("handle_statfs_reply %p tid %llu\n", msg, tid);
mutex_lock(&monc->mutex);
req = __lookup_generic_req(monc, tid);
if (req) {
*(struct ceph_statfs *)req->buf = reply->st;
req->result = 0;
get_generic_request(req);
req = lookup_generic_request(&monc->generic_request_tree, tid);
if (!req) {
mutex_unlock(&monc->mutex);
return;
}
req->result = 0;
*req->u.st = reply->st; /* struct */
__finish_generic_request(req);
mutex_unlock(&monc->mutex);
if (req) {
complete_all(&req->completion);
put_generic_request(req);
}
complete_generic_request(req);
return;
bad:
@@ -646,38 +676,38 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
{
struct ceph_mon_generic_request *req;
struct ceph_mon_statfs *h;
int err;
int ret = -ENOMEM;
req = kzalloc(sizeof(*req), GFP_NOFS);
req = alloc_generic_request(monc, GFP_NOFS);
if (!req)
return -ENOMEM;
goto out;
kref_init(&req->kref);
req->buf = buf;
init_completion(&req->completion);
err = -ENOMEM;
req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
true);
if (!req->request)
goto out;
req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
true);
req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true);
if (!req->reply)
goto out;
req->u.st = buf;
mutex_lock(&monc->mutex);
register_generic_request(req);
/* fill out request */
h = req->request->front.iov_base;
h->monhdr.have_version = 0;
h->monhdr.session_mon = cpu_to_le16(-1);
h->monhdr.session_mon_tid = 0;
h->fsid = monc->monmap->fsid;
send_generic_request(monc, req);
mutex_unlock(&monc->mutex);
err = do_generic_request(monc, req);
ret = wait_generic_request(req);
out:
put_generic_request(req);
return err;
return ret;
}
EXPORT_SYMBOL(ceph_monc_do_statfs);
@@ -690,7 +720,7 @@ static void handle_get_version_reply(struct ceph_mon_client *monc,
void *end = p + msg->front_alloc_len;
u64 handle;
dout("%s %p tid %llu\n", __func__, msg, tid);
dout("%s msg %p tid %llu\n", __func__, msg, tid);
ceph_decode_need(&p, end, 2*sizeof(u64), bad);
handle = ceph_decode_64(&p);
@@ -698,77 +728,111 @@ static void handle_get_version_reply(struct ceph_mon_client *monc,
goto bad;
mutex_lock(&monc->mutex);
req = __lookup_generic_req(monc, handle);
if (req) {
*(u64 *)req->buf = ceph_decode_64(&p);
req->result = 0;
get_generic_request(req);
}
mutex_unlock(&monc->mutex);
if (req) {
complete_all(&req->completion);
put_generic_request(req);
req = lookup_generic_request(&monc->generic_request_tree, handle);
if (!req) {
mutex_unlock(&monc->mutex);
return;
}
req->result = 0;
req->u.newest = ceph_decode_64(&p);
__finish_generic_request(req);
mutex_unlock(&monc->mutex);
complete_generic_request(req);
return;
bad:
pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
ceph_msg_dump(msg);
}
static struct ceph_mon_generic_request *
__ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data)
{
struct ceph_mon_generic_request *req;
req = alloc_generic_request(monc, GFP_NOIO);
if (!req)
goto err_put_req;
req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
sizeof(u64) + sizeof(u32) + strlen(what),
GFP_NOIO, true);
if (!req->request)
goto err_put_req;
req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO,
true);
if (!req->reply)
goto err_put_req;
req->complete_cb = cb;
req->private_data = private_data;
mutex_lock(&monc->mutex);
register_generic_request(req);
{
void *p = req->request->front.iov_base;
void *const end = p + req->request->front_alloc_len;
ceph_encode_64(&p, req->tid); /* handle */
ceph_encode_string(&p, end, what, strlen(what));
WARN_ON(p != end);
}
send_generic_request(monc, req);
mutex_unlock(&monc->mutex);
return req;
err_put_req:
put_generic_request(req);
return ERR_PTR(-ENOMEM);
}
/*
* Send MMonGetVersion and wait for the reply.
*
* @what: one of "mdsmap", "osdmap" or "monmap"
*/
int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
u64 *newest)
int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
u64 *newest)
{
struct ceph_mon_generic_request *req;
void *p, *end;
u64 tid;
int err;
int ret;
req = kzalloc(sizeof(*req), GFP_NOFS);
if (!req)
return -ENOMEM;
req = __ceph_monc_get_version(monc, what, NULL, 0);
if (IS_ERR(req))
return PTR_ERR(req);
kref_init(&req->kref);
req->buf = newest;
init_completion(&req->completion);
ret = wait_generic_request(req);
if (!ret)
*newest = req->u.newest;
req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
sizeof(u64) + sizeof(u32) + strlen(what),
GFP_NOFS, true);
if (!req->request) {
err = -ENOMEM;
goto out;
}
req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024,
GFP_NOFS, true);
if (!req->reply) {
err = -ENOMEM;
goto out;
}
p = req->request->front.iov_base;
end = p + req->request->front_alloc_len;
/* fill out request */
mutex_lock(&monc->mutex);
tid = ++monc->last_tid;
ceph_encode_64(&p, tid); /* handle */
ceph_encode_string(&p, end, what, strlen(what));
err = __do_generic_request(monc, tid, req);
mutex_unlock(&monc->mutex);
out:
put_generic_request(req);
return err;
return ret;
}
EXPORT_SYMBOL(ceph_monc_do_get_version);
EXPORT_SYMBOL(ceph_monc_get_version);
/*
* Send MMonGetVersion,
*
* @what: one of "mdsmap", "osdmap" or "monmap"
*/
int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data)
{
struct ceph_mon_generic_request *req;
req = __ceph_monc_get_version(monc, what, cb, private_data);
if (IS_ERR(req))
return PTR_ERR(req);
put_generic_request(req);
return 0;
}
EXPORT_SYMBOL(ceph_monc_get_version_async);
/*
* Resend pending generic requests.
@@ -890,7 +954,7 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
if (!monc->m_subscribe_ack)
goto out_auth;
monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128, GFP_NOFS,
true);
if (!monc->m_subscribe)
goto out_subscribe_ack;
@@ -914,9 +978,10 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
monc->generic_request_tree = RB_ROOT;
monc->num_generic_requests = 0;
monc->last_tid = 0;
monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE;
return 0;
out_auth_reply:
@@ -954,6 +1019,8 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
ceph_auth_destroy(monc->auth);
WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree));
ceph_msg_put(monc->m_auth);
ceph_msg_put(monc->m_auth_reply);
ceph_msg_put(monc->m_subscribe);

File diff suppressed because it is too large Load Diff

View File

@@ -380,23 +380,24 @@ bad:
return ERR_PTR(err);
}
int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
{
if (lhs->pool < rhs->pool)
return -1;
if (lhs->pool > rhs->pool)
return 1;
if (lhs->seed < rhs->seed)
return -1;
if (lhs->seed > rhs->seed)
return 1;
return 0;
}
/*
* rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
* to a set of osds) and primary_temp (explicit primary setting)
*/
static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
{
if (l.pool < r.pool)
return -1;
if (l.pool > r.pool)
return 1;
if (l.seed < r.seed)
return -1;
if (l.seed > r.seed)
return 1;
return 0;
}
static int __insert_pg_mapping(struct ceph_pg_mapping *new,
struct rb_root *root)
{
@@ -409,7 +410,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new,
while (*p) {
parent = *p;
pg = rb_entry(parent, struct ceph_pg_mapping, node);
c = pgid_cmp(new->pgid, pg->pgid);
c = ceph_pg_compare(&new->pgid, &pg->pgid);
if (c < 0)
p = &(*p)->rb_left;
else if (c > 0)
@@ -432,7 +433,7 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
while (n) {
pg = rb_entry(n, struct ceph_pg_mapping, node);
c = pgid_cmp(pgid, pg->pgid);
c = ceph_pg_compare(&pgid, &pg->pgid);
if (c < 0) {
n = n->rb_left;
} else if (c > 0) {
@@ -596,7 +597,9 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
*p += 4; /* skip crash_replay_interval */
if (ev >= 7)
*p += 1; /* skip min_size */
pi->min_size = ceph_decode_8(p);
else
pi->min_size = pi->size - pi->size / 2;
if (ev >= 8)
*p += 8 + 8; /* skip quota_max_* */
@@ -616,6 +619,50 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
pi->write_tier = -1;
}
if (ev >= 10) {
/* skip properties */
num = ceph_decode_32(p);
while (num--) {
len = ceph_decode_32(p);
*p += len; /* key */
len = ceph_decode_32(p);
*p += len; /* val */
}
}
if (ev >= 11) {
/* skip hit_set_params */
*p += 1 + 1; /* versions */
len = ceph_decode_32(p);
*p += len;
*p += 4; /* skip hit_set_period */
*p += 4; /* skip hit_set_count */
}
if (ev >= 12)
*p += 4; /* skip stripe_width */
if (ev >= 13) {
*p += 8; /* skip target_max_bytes */
*p += 8; /* skip target_max_objects */
*p += 4; /* skip cache_target_dirty_ratio_micro */
*p += 4; /* skip cache_target_full_ratio_micro */
*p += 4; /* skip cache_min_flush_age */
*p += 4; /* skip cache_min_evict_age */
}
if (ev >= 14) {
/* skip erasure_code_profile */
len = ceph_decode_32(p);
*p += len;
}
if (ev >= 15)
pi->last_force_request_resend = ceph_decode_32(p);
else
pi->last_force_request_resend = 0;
/* ignore the rest */
*p = pool_end;
@@ -660,6 +707,23 @@ bad:
/*
* osd map
*/
struct ceph_osdmap *ceph_osdmap_alloc(void)
{
struct ceph_osdmap *map;
map = kzalloc(sizeof(*map), GFP_NOIO);
if (!map)
return NULL;
map->pg_pools = RB_ROOT;
map->pool_max = -1;
map->pg_temp = RB_ROOT;
map->primary_temp = RB_ROOT;
mutex_init(&map->crush_scratch_mutex);
return map;
}
void ceph_osdmap_destroy(struct ceph_osdmap *map)
{
dout("osdmap_destroy %p\n", map);
@@ -1183,14 +1247,10 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
struct ceph_osdmap *map;
int ret;
map = kzalloc(sizeof(*map), GFP_NOFS);
map = ceph_osdmap_alloc();
if (!map)
return ERR_PTR(-ENOMEM);
map->pg_temp = RB_ROOT;
map->primary_temp = RB_ROOT;
mutex_init(&map->crush_scratch_mutex);
ret = osdmap_decode(p, end, map);
if (ret) {
ceph_osdmap_destroy(map);
@@ -1204,8 +1264,7 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
* decode and apply an incremental map update.
*/
struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
struct ceph_osdmap *map,
struct ceph_messenger *msgr)
struct ceph_osdmap *map)
{
struct crush_map *newcrush = NULL;
struct ceph_fsid fsid;
@@ -1381,8 +1440,252 @@ bad:
return ERR_PTR(err);
}
void ceph_oid_copy(struct ceph_object_id *dest,
const struct ceph_object_id *src)
{
WARN_ON(!ceph_oid_empty(dest));
if (src->name != src->inline_name) {
/* very rare, see ceph_object_id definition */
dest->name = kmalloc(src->name_len + 1,
GFP_NOIO | __GFP_NOFAIL);
}
memcpy(dest->name, src->name, src->name_len + 1);
dest->name_len = src->name_len;
}
EXPORT_SYMBOL(ceph_oid_copy);
static __printf(2, 0)
int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
{
int len;
WARN_ON(!ceph_oid_empty(oid));
len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
if (len >= sizeof(oid->inline_name))
return len;
oid->name_len = len;
return 0;
}
/*
* If oid doesn't fit into inline buffer, BUG.
*/
void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
BUG_ON(oid_printf_vargs(oid, fmt, ap));
va_end(ap);
}
EXPORT_SYMBOL(ceph_oid_printf);
static __printf(3, 0)
int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
const char *fmt, va_list ap)
{
va_list aq;
int len;
va_copy(aq, ap);
len = oid_printf_vargs(oid, fmt, aq);
va_end(aq);
if (len) {
char *external_name;
external_name = kmalloc(len + 1, gfp);
if (!external_name)
return -ENOMEM;
oid->name = external_name;
WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
oid->name_len = len;
}
return 0;
}
/*
* If oid doesn't fit into inline buffer, allocate.
*/
int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
va_end(ap);
return ret;
}
EXPORT_SYMBOL(ceph_oid_aprintf);
void ceph_oid_destroy(struct ceph_object_id *oid)
{
if (oid->name != oid->inline_name)
kfree(oid->name);
}
EXPORT_SYMBOL(ceph_oid_destroy);
/*
* osds only
*/
static bool __osds_equal(const struct ceph_osds *lhs,
const struct ceph_osds *rhs)
{
if (lhs->size == rhs->size &&
!memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
return true;
return false;
}
/*
* osds + primary
*/
static bool osds_equal(const struct ceph_osds *lhs,
const struct ceph_osds *rhs)
{
if (__osds_equal(lhs, rhs) &&
lhs->primary == rhs->primary)
return true;
return false;
}
static bool osds_valid(const struct ceph_osds *set)
{
/* non-empty set */
if (set->size > 0 && set->primary >= 0)
return true;
/* empty can_shift_osds set */
if (!set->size && set->primary == -1)
return true;
/* empty !can_shift_osds set - all NONE */
if (set->size > 0 && set->primary == -1) {
int i;
for (i = 0; i < set->size; i++) {
if (set->osds[i] != CRUSH_ITEM_NONE)
break;
}
if (i == set->size)
return true;
}
return false;
}
void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
{
memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
dest->size = src->size;
dest->primary = src->primary;
}
static bool is_split(const struct ceph_pg *pgid,
u32 old_pg_num,
u32 new_pg_num)
{
int old_bits = calc_bits_of(old_pg_num);
int old_mask = (1 << old_bits) - 1;
int n;
WARN_ON(pgid->seed >= old_pg_num);
if (new_pg_num <= old_pg_num)
return false;
for (n = 1; ; n++) {
int next_bit = n << (old_bits - 1);
u32 s = next_bit | pgid->seed;
if (s < old_pg_num || s == pgid->seed)
continue;
if (s >= new_pg_num)
break;
s = ceph_stable_mod(s, old_pg_num, old_mask);
if (s == pgid->seed)
return true;
}
return false;
}
bool ceph_is_new_interval(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
const struct ceph_osds *old_up,
const struct ceph_osds *new_up,
int old_size,
int new_size,
int old_min_size,
int new_min_size,
u32 old_pg_num,
u32 new_pg_num,
bool old_sort_bitwise,
bool new_sort_bitwise,
const struct ceph_pg *pgid)
{
return !osds_equal(old_acting, new_acting) ||
!osds_equal(old_up, new_up) ||
old_size != new_size ||
old_min_size != new_min_size ||
is_split(pgid, old_pg_num, new_pg_num) ||
old_sort_bitwise != new_sort_bitwise;
}
static int calc_pg_rank(int osd, const struct ceph_osds *acting)
{
int i;
for (i = 0; i < acting->size; i++) {
if (acting->osds[i] == osd)
return i;
}
return -1;
}
static bool primary_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting)
{
if (!old_acting->size && !new_acting->size)
return false; /* both still empty */
if (!old_acting->size ^ !new_acting->size)
return true; /* was empty, now not, or vice versa */
if (old_acting->primary != new_acting->primary)
return true; /* primary changed */
if (calc_pg_rank(old_acting->primary, old_acting) !=
calc_pg_rank(new_acting->primary, new_acting))
return true;
return false; /* same primary (tho replicas may have changed) */
}
bool ceph_osds_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
bool any_change)
{
if (primary_changed(old_acting, new_acting))
return true;
if (any_change && !__osds_equal(old_acting, new_acting))
return true;
return false;
}
/*
* calculate file layout from given offset, length.
@@ -1455,30 +1758,71 @@ invalid:
EXPORT_SYMBOL(ceph_calc_file_object_mapping);
/*
* Calculate mapping of a (oloc, oid) pair to a PG. Should only be
* called with target's (oloc, oid), since tiering isn't taken into
* account.
* Map an object into a PG.
*
* Should only be called with target_oid and target_oloc (as opposed to
* base_oid and base_oloc), since tiering isn't taken into account.
*/
int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
struct ceph_object_locator *oloc,
struct ceph_object_id *oid,
struct ceph_pg *pg_out)
int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
struct ceph_pg *raw_pgid)
{
struct ceph_pg_pool_info *pi;
pi = __lookup_pg_pool(&osdmap->pg_pools, oloc->pool);
pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
if (!pi)
return -EIO;
return -ENOENT;
pg_out->pool = oloc->pool;
pg_out->seed = ceph_str_hash(pi->object_hash, oid->name,
oid->name_len);
raw_pgid->pool = oloc->pool;
raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
oid->name_len);
dout("%s '%.*s' pgid %llu.%x\n", __func__, oid->name_len, oid->name,
pg_out->pool, pg_out->seed);
dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len,
oid->name, raw_pgid->pool, raw_pgid->seed);
return 0;
}
EXPORT_SYMBOL(ceph_oloc_oid_to_pg);
EXPORT_SYMBOL(ceph_object_locator_to_pg);
/*
* Map a raw PG (full precision ps) into an actual PG.
*/
static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid,
struct ceph_pg *pgid)
{
pgid->pool = raw_pgid->pool;
pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
pi->pg_num_mask);
}
/*
* Map a raw PG (full precision ps) into a placement ps (placement
* seed). Include pool id in that value so that different pools don't
* use the same seeds.
*/
static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid)
{
if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
/* hash pool id and seed so that pool PGs do not overlap */
return crush_hash32_2(CRUSH_HASH_RJENKINS1,
ceph_stable_mod(raw_pgid->seed,
pi->pgp_num,
pi->pgp_num_mask),
raw_pgid->pool);
} else {
/*
* legacy behavior: add ps and pool together. this is
* not a great approach because the PGs from each pool
* will overlap on top of each other: 0.5 == 1.4 ==
* 2.3 == ...
*/
return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
pi->pgp_num_mask) +
(unsigned)raw_pgid->pool;
}
}
static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
int *result, int result_max,
@@ -1497,84 +1841,92 @@ static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
}
/*
* Calculate raw (crush) set for given pgid.
* Calculate raw set (CRUSH output) for given PG. The result may
* contain nonexistent OSDs. ->primary is undefined for a raw set.
*
* Return raw set length, or error.
* Placement seed (CRUSH input) is returned through @ppps.
*/
static int pg_to_raw_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pool,
struct ceph_pg pgid, u32 pps, int *osds)
static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid,
struct ceph_osds *raw,
u32 *ppps)
{
u32 pps = raw_pg_to_pps(pi, raw_pgid);
int ruleno;
int len;
/* crush */
ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
pool->type, pool->size);
ceph_osds_init(raw);
if (ppps)
*ppps = pps;
ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
pi->size);
if (ruleno < 0) {
pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
pgid.pool, pool->crush_ruleset, pool->type,
pool->size);
return -ENOENT;
pi->id, pi->crush_ruleset, pi->type, pi->size);
return;
}
len = do_crush(osdmap, ruleno, pps, osds,
min_t(int, pool->size, CEPH_PG_MAX_SIZE),
len = do_crush(osdmap, ruleno, pps, raw->osds,
min_t(int, pi->size, ARRAY_SIZE(raw->osds)),
osdmap->osd_weight, osdmap->max_osd);
if (len < 0) {
pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
len, ruleno, pgid.pool, pool->crush_ruleset,
pool->type, pool->size);
return len;
len, ruleno, pi->id, pi->crush_ruleset, pi->type,
pi->size);
return;
}
return len;
raw->size = len;
}
/*
* Given raw set, calculate up set and up primary.
* Given raw set, calculate up set and up primary. By definition of an
* up set, the result won't contain nonexistent or down OSDs.
*
* Return up set length. *primary is set to up primary osd id, or -1
* if up set is empty.
* This is done in-place - on return @set is the up set. If it's
* empty, ->primary will remain undefined.
*/
static int raw_to_up_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pool,
int *osds, int len, int *primary)
static void raw_to_up_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
struct ceph_osds *set)
{
int up_primary = -1;
int i;
if (ceph_can_shift_osds(pool)) {
/* ->primary is undefined for a raw set */
BUG_ON(set->primary != -1);
if (ceph_can_shift_osds(pi)) {
int removed = 0;
for (i = 0; i < len; i++) {
if (ceph_osd_is_down(osdmap, osds[i])) {
/* shift left */
for (i = 0; i < set->size; i++) {
if (ceph_osd_is_down(osdmap, set->osds[i])) {
removed++;
continue;
}
if (removed)
osds[i - removed] = osds[i];
set->osds[i - removed] = set->osds[i];
}
len -= removed;
if (len > 0)
up_primary = osds[0];
set->size -= removed;
if (set->size > 0)
set->primary = set->osds[0];
} else {
for (i = len - 1; i >= 0; i--) {
if (ceph_osd_is_down(osdmap, osds[i]))
osds[i] = CRUSH_ITEM_NONE;
/* set down/dne devices to NONE */
for (i = set->size - 1; i >= 0; i--) {
if (ceph_osd_is_down(osdmap, set->osds[i]))
set->osds[i] = CRUSH_ITEM_NONE;
else
up_primary = osds[i];
set->primary = set->osds[i];
}
}
*primary = up_primary;
return len;
}
static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
struct ceph_pg_pool_info *pool,
int *osds, int len, int *primary)
static void apply_primary_affinity(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
u32 pps,
struct ceph_osds *up)
{
int i;
int pos = -1;
@@ -1586,8 +1938,8 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
if (!osdmap->osd_primary_affinity)
return;
for (i = 0; i < len; i++) {
int osd = osds[i];
for (i = 0; i < up->size; i++) {
int osd = up->osds[i];
if (osd != CRUSH_ITEM_NONE &&
osdmap->osd_primary_affinity[osd] !=
@@ -1595,7 +1947,7 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
break;
}
}
if (i == len)
if (i == up->size)
return;
/*
@@ -1603,8 +1955,8 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
* osd into the hash/rng so that a proportional fraction of an
* osd's pgs get rejected as primary.
*/
for (i = 0; i < len; i++) {
int osd = osds[i];
for (i = 0; i < up->size; i++) {
int osd = up->osds[i];
u32 aff;
if (osd == CRUSH_ITEM_NONE)
@@ -1629,135 +1981,110 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
if (pos < 0)
return;
*primary = osds[pos];
up->primary = up->osds[pos];
if (ceph_can_shift_osds(pool) && pos > 0) {
if (ceph_can_shift_osds(pi) && pos > 0) {
/* move the new primary to the front */
for (i = pos; i > 0; i--)
osds[i] = osds[i - 1];
osds[0] = *primary;
up->osds[i] = up->osds[i - 1];
up->osds[0] = up->primary;
}
}
/*
* Given up set, apply pg_temp and primary_temp mappings.
* Get pg_temp and primary_temp mappings for given PG.
*
* Return acting set length. *primary is set to acting primary osd id,
* or -1 if acting set is empty.
* Note that a PG may have none, only pg_temp, only primary_temp or
* both pg_temp and primary_temp mappings. This means @temp isn't
* always a valid OSD set on return: in the "only primary_temp" case,
* @temp will have its ->primary >= 0 but ->size == 0.
*/
static int apply_temps(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pool, struct ceph_pg pgid,
int *osds, int len, int *primary)
static void get_temp_osds(struct ceph_osdmap *osdmap,
struct ceph_pg_pool_info *pi,
const struct ceph_pg *raw_pgid,
struct ceph_osds *temp)
{
struct ceph_pg pgid;
struct ceph_pg_mapping *pg;
int temp_len;
int temp_primary;
int i;
/* raw_pg -> pg */
pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
pool->pg_num_mask);
raw_pg_to_pg(pi, raw_pgid, &pgid);
ceph_osds_init(temp);
/* pg_temp? */
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
temp_len = 0;
temp_primary = -1;
for (i = 0; i < pg->pg_temp.len; i++) {
if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
if (ceph_can_shift_osds(pool))
if (ceph_can_shift_osds(pi))
continue;
else
osds[temp_len++] = CRUSH_ITEM_NONE;
temp->osds[temp->size++] = CRUSH_ITEM_NONE;
} else {
osds[temp_len++] = pg->pg_temp.osds[i];
temp->osds[temp->size++] = pg->pg_temp.osds[i];
}
}
/* apply pg_temp's primary */
for (i = 0; i < temp_len; i++) {
if (osds[i] != CRUSH_ITEM_NONE) {
temp_primary = osds[i];
for (i = 0; i < temp->size; i++) {
if (temp->osds[i] != CRUSH_ITEM_NONE) {
temp->primary = temp->osds[i];
break;
}
}
} else {
temp_len = len;
temp_primary = *primary;
}
/* primary_temp? */
pg = __lookup_pg_mapping(&osdmap->primary_temp, pgid);
if (pg)
temp_primary = pg->primary_temp.osd;
*primary = temp_primary;
return temp_len;
temp->primary = pg->primary_temp.osd;
}
/*
* Calculate acting set for given pgid.
* Map a PG to its acting set as well as its up set.
*
* Return acting set length, or error. *primary is set to acting
* primary osd id, or -1 if acting set is empty or on error.
* Acting set is used for data mapping purposes, while up set can be
* recorded for detecting interval changes and deciding whether to
* resend a request.
*/
int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
int *osds, int *primary)
void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
const struct ceph_pg *raw_pgid,
struct ceph_osds *up,
struct ceph_osds *acting)
{
struct ceph_pg_pool_info *pool;
struct ceph_pg_pool_info *pi;
u32 pps;
int len;
pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
if (!pool) {
*primary = -1;
return -ENOENT;
pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
if (!pi) {
ceph_osds_init(up);
ceph_osds_init(acting);
goto out;
}
if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
/* hash pool id and seed so that pool PGs do not overlap */
pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
ceph_stable_mod(pgid.seed, pool->pgp_num,
pool->pgp_num_mask),
pgid.pool);
} else {
/*
* legacy behavior: add ps and pool together. this is
* not a great approach because the PGs from each pool
* will overlap on top of each other: 0.5 == 1.4 ==
* 2.3 == ...
*/
pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
pool->pgp_num_mask) +
(unsigned)pgid.pool;
pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
raw_to_up_osds(osdmap, pi, up);
apply_primary_affinity(osdmap, pi, pps, up);
get_temp_osds(osdmap, pi, raw_pgid, acting);
if (!acting->size) {
memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
acting->size = up->size;
if (acting->primary == -1)
acting->primary = up->primary;
}
len = pg_to_raw_osds(osdmap, pool, pgid, pps, osds);
if (len < 0) {
*primary = -1;
return len;
}
len = raw_to_up_osds(osdmap, pool, osds, len, primary);
apply_primary_affinity(osdmap, pps, pool, osds, len, primary);
len = apply_temps(osdmap, pool, pgid, osds, len, primary);
return len;
out:
WARN_ON(!osds_valid(up) || !osds_valid(acting));
}
/*
* Return primary osd for given pgid, or -1 if none.
* Return acting primary for given PG, or -1 if none.
*/
int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
const struct ceph_pg *raw_pgid)
{
int osds[CEPH_PG_MAX_SIZE];
int primary;
struct ceph_osds up, acting;
ceph_calc_pg_acting(osdmap, pgid, osds, &primary);
return primary;
ceph_pg_to_up_acting_osds(osdmap, raw_pgid, &up, &acting);
return acting.primary;
}
EXPORT_SYMBOL(ceph_calc_pg_primary);
EXPORT_SYMBOL(ceph_pg_to_acting_primary);