Merge branch 'ida-4.19' of git://git.infradead.org/users/willy/linux-dax

Pull IDA updates from Matthew Wilcox:
 "A better IDA API:

      id = ida_alloc(ida, GFP_xxx);
      ida_free(ida, id);

  rather than the cumbersome ida_simple_get(), ida_simple_remove().

  The new IDA API is similar to ida_simple_get() but better named.  The
  internal restructuring of the IDA code removes the bitmap
  preallocation nonsense.

  I hope the net -200 lines of code is convincing"

* 'ida-4.19' of git://git.infradead.org/users/willy/linux-dax: (29 commits)
  ida: Change ida_get_new_above to return the id
  ida: Remove old API
  test_ida: check_ida_destroy and check_ida_alloc
  test_ida: Convert check_ida_conv to new API
  test_ida: Move ida_check_max
  test_ida: Move ida_check_leaf
  idr-test: Convert ida_check_nomem to new API
  ida: Start new test_ida module
  target/iscsi: Allocate session IDs from an IDA
  iscsi target: fix session creation failure handling
  drm/vmwgfx: Convert to new IDA API
  dmaengine: Convert to new IDA API
  ppc: Convert vas ID allocation to new IDA API
  media: Convert entity ID allocation to new IDA API
  ppc: Convert mmu context allocation to new IDA API
  Convert net_namespace to new IDA API
  cb710: Convert to new IDA API
  rsxx: Convert to new IDA API
  osd: Convert to new IDA API
  sd: Convert to new IDA API
  ...
This commit is contained in:
Linus Torvalds
2018-08-26 11:48:42 -07:00
28 fájl változott, egészen pontosan 486 új sor hozzáadva és 680 régi sor törölve

Fájl megtekintése

@@ -46,7 +46,7 @@ static int pty_limit = NR_UNIX98_PTY_DEFAULT;
static int pty_reserve = NR_UNIX98_PTY_RESERVE;
static int pty_limit_min;
static int pty_limit_max = INT_MAX;
static int pty_count;
static atomic_t pty_count = ATOMIC_INIT(0);
static struct ctl_table pty_table[] = {
{
@@ -93,8 +93,6 @@ static struct ctl_table pty_root_table[] = {
{}
};
static DEFINE_MUTEX(allocated_ptys_lock);
struct pts_mount_opts {
int setuid;
int setgid;
@@ -533,44 +531,25 @@ static struct file_system_type devpts_fs_type = {
int devpts_new_index(struct pts_fs_info *fsi)
{
int index;
int ida_ret;
int index = -ENOSPC;
retry:
if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
return -ENOMEM;
if (atomic_inc_return(&pty_count) >= (pty_limit -
(fsi->mount_opts.reserve ? 0 : pty_reserve)))
goto out;
mutex_lock(&allocated_ptys_lock);
if (pty_count >= (pty_limit -
(fsi->mount_opts.reserve ? 0 : pty_reserve))) {
mutex_unlock(&allocated_ptys_lock);
return -ENOSPC;
}
index = ida_alloc_max(&fsi->allocated_ptys, fsi->mount_opts.max - 1,
GFP_KERNEL);
ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
if (ida_ret < 0) {
mutex_unlock(&allocated_ptys_lock);
if (ida_ret == -EAGAIN)
goto retry;
return -EIO;
}
if (index >= fsi->mount_opts.max) {
ida_remove(&fsi->allocated_ptys, index);
mutex_unlock(&allocated_ptys_lock);
return -ENOSPC;
}
pty_count++;
mutex_unlock(&allocated_ptys_lock);
out:
if (index < 0)
atomic_dec(&pty_count);
return index;
}
void devpts_kill_index(struct pts_fs_info *fsi, int idx)
{
mutex_lock(&allocated_ptys_lock);
ida_remove(&fsi->allocated_ptys, idx);
pty_count--;
mutex_unlock(&allocated_ptys_lock);
ida_free(&fsi->allocated_ptys, idx);
atomic_dec(&pty_count);
}
/**

Fájl megtekintése

@@ -61,9 +61,6 @@ __setup("mphash_entries=", set_mphash_entries);
static u64 event;
static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
static DEFINE_SPINLOCK(mnt_id_lock);
static int mnt_id_start = 0;
static int mnt_group_start = 1;
static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly;
@@ -101,50 +98,30 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
static int mnt_alloc_id(struct mount *mnt)
{
int res;
int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
retry:
ida_pre_get(&mnt_id_ida, GFP_KERNEL);
spin_lock(&mnt_id_lock);
res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
if (!res)
mnt_id_start = mnt->mnt_id + 1;
spin_unlock(&mnt_id_lock);
if (res == -EAGAIN)
goto retry;
return res;
if (res < 0)
return res;
mnt->mnt_id = res;
return 0;
}
static void mnt_free_id(struct mount *mnt)
{
int id = mnt->mnt_id;
spin_lock(&mnt_id_lock);
ida_remove(&mnt_id_ida, id);
if (mnt_id_start > id)
mnt_id_start = id;
spin_unlock(&mnt_id_lock);
ida_free(&mnt_id_ida, mnt->mnt_id);
}
/*
* Allocate a new peer group ID
*
* mnt_group_ida is protected by namespace_sem
*/
static int mnt_alloc_group_id(struct mount *mnt)
{
int res;
int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
return -ENOMEM;
res = ida_get_new_above(&mnt_group_ida,
mnt_group_start,
&mnt->mnt_group_id);
if (!res)
mnt_group_start = mnt->mnt_group_id + 1;
return res;
if (res < 0)
return res;
mnt->mnt_group_id = res;
return 0;
}
/*
@@ -152,10 +129,7 @@ static int mnt_alloc_group_id(struct mount *mnt)
*/
void mnt_release_group_id(struct mount *mnt)
{
int id = mnt->mnt_group_id;
ida_remove(&mnt_group_ida, id);
if (mnt_group_start > id)
mnt_group_start = id;
ida_free(&mnt_group_ida, mnt->mnt_group_id);
mnt->mnt_group_id = 0;
}

Fájl megtekintése

@@ -981,58 +981,42 @@ void emergency_thaw_all(void)
}
}
/*
* Unnamed block devices are dummy devices used by virtual
* filesystems which don't use real block-devices. -- jrs
*/
static DEFINE_IDA(unnamed_dev_ida);
static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
/* Many userspace utilities consider an FSID of 0 invalid.
* Always return at least 1 from get_anon_bdev.
*/
static int unnamed_dev_start = 1;
/**
* get_anon_bdev - Allocate a block device for filesystems which don't have one.
* @p: Pointer to a dev_t.
*
* Filesystems which don't use real block devices can call this function
* to allocate a virtual block device.
*
* Context: Any context. Frequently called while holding sb_lock.
* Return: 0 on success, -EMFILE if there are no anonymous bdevs left
* or -ENOMEM if memory allocation failed.
*/
int get_anon_bdev(dev_t *p)
{
int dev;
int error;
retry:
if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
return -ENOMEM;
spin_lock(&unnamed_dev_lock);
error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
if (!error)
unnamed_dev_start = dev + 1;
spin_unlock(&unnamed_dev_lock);
if (error == -EAGAIN)
/* We raced and lost with another CPU. */
goto retry;
else if (error)
return -EAGAIN;
/*
* Many userspace utilities consider an FSID of 0 invalid.
* Always return at least 1 from get_anon_bdev.
*/
dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
GFP_ATOMIC);
if (dev == -ENOSPC)
dev = -EMFILE;
if (dev < 0)
return dev;
if (dev >= (1 << MINORBITS)) {
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, dev);
if (unnamed_dev_start > dev)
unnamed_dev_start = dev;
spin_unlock(&unnamed_dev_lock);
return -EMFILE;
}
*p = MKDEV(0, dev & MINORMASK);
*p = MKDEV(0, dev);
return 0;
}
EXPORT_SYMBOL(get_anon_bdev);
void free_anon_bdev(dev_t dev)
{
int slot = MINOR(dev);
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, slot);
if (slot < unnamed_dev_start)
unnamed_dev_start = slot;
spin_unlock(&unnamed_dev_lock);
ida_free(&unnamed_dev_ida, MINOR(dev));
}
EXPORT_SYMBOL(free_anon_bdev);
@@ -1040,7 +1024,6 @@ int set_anon_super(struct super_block *s, void *data)
{
return get_anon_bdev(&s->s_dev);
}
EXPORT_SYMBOL(set_anon_super);
void kill_anon_super(struct super_block *sb)
@@ -1049,7 +1032,6 @@ void kill_anon_super(struct super_block *sb)
generic_shutdown_super(sb);
free_anon_bdev(dev);
}
EXPORT_SYMBOL(kill_anon_super);
void kill_litter_super(struct super_block *sb)
@@ -1058,7 +1040,6 @@ void kill_litter_super(struct super_block *sb)
d_genocide(sb->s_root);
kill_anon_super(sb);
}
EXPORT_SYMBOL(kill_litter_super);
static int ns_test_super(struct super_block *sb, void *data)