#include <linux/export.h>
+#define FILE_DATA(_file) ((_file)->f_path.dentry->d_inode)
+
+#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, name##_show, inode->i_private); \
+} \
+static const struct file_operations name##_debugfs_fops = { \
+ .owner = THIS_MODULE, \
+ .open = name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release \
+}
+
struct t4_debugfs_entry {
const char *name;
const struct file_operations *ops;
- mode_t mode;
+ umode_t mode;
unsigned char data;
};
+struct seq_tab {
+ int (*show)(struct seq_file *seq, void *v, int idx);
+ unsigned int rows; /* # of entries */
+ unsigned char width; /* size in bytes of each entry */
+ unsigned char skip_first; /* whether the first line is a header */
+ char data[0]; /* the table data */
+};
+
+static inline unsigned int hex2val(char c)
+{
+ return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+ unsigned int width, unsigned int have_header,
+ int (*show)(struct seq_file *seq, void *v, int i));
+
int t4_setup_debugfs(struct adapter *adap);
void add_debugfs_files(struct adapter *adap,
struct t4_debugfs_entry *files,
unsigned int nfiles);
+int mem_open(struct inode *inode, struct file *file);
#endif
return rc;
}
- static void ll_dump_inode(struct inode *inode)
- {
- struct ll_d_hlist_node *tmp;
- int dentry_count = 0;
-
- LASSERT(inode != NULL);
-
- ll_d_hlist_for_each(tmp, &inode->i_dentry)
- dentry_count++;
-
- CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
- inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
- inode->i_mode, atomic_read(&inode->i_count), dentry_count);
- }
-
- void lustre_dump_dentry(struct dentry *dentry, int recur)
- {
- struct list_head *tmp;
- int subdirs = 0;
-
- LASSERT(dentry != NULL);
-
- list_for_each(tmp, &dentry->d_subdirs)
- subdirs++;
-
- CERROR("dentry %p dump: name=%pd parent=%pd (%p), inode=%p, count=%u, flags=0x%x, fsdata=%p, %d subdirs\n",
- dentry, dentry, dentry->d_parent, dentry->d_parent,
- dentry->d_inode, d_count(dentry),
- dentry->d_flags, dentry->d_fsdata, subdirs);
- if (dentry->d_inode != NULL)
- ll_dump_inode(dentry->d_inode);
-
- if (recur == 0)
- return;
-
- list_for_each(tmp, &dentry->d_subdirs) {
- struct dentry *d = list_entry(tmp, struct dentry, d_child);
-
- lustre_dump_dentry(d, recur - 1);
- }
- }
-
static void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
if (err)
goto out_free;
lsi->lsi_flags |= LSI_BDI_INITIALIZED;
- lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
+ lsi->lsi_bdi.capabilities = 0;
err = ll_bdi_register(&lsi->lsi_bdi);
if (err)
goto out_free;
/* OIDEBUG(inode); */
- /* initializing backing dev info. */
- inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
-
-
if (S_ISREG(inode->i_mode)) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
+#include <linux/kasan.h>
+
#include "internal.h"
#include "mount.h"
* LRU lists entirely, while shrink_move moves it to the indicated
* private list.
*/
-static void d_lru_isolate(struct dentry *dentry)
+static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
- list_del_init(&dentry->d_lru);
+ list_lru_isolate(lru, &dentry->d_lru);
}
-static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
+static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
+ struct list_head *list)
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags |= DCACHE_SHRINK_LIST;
- list_move_tail(&dentry->d_lru, list);
+ list_lru_isolate_move(lru, &dentry->d_lru, list);
}
/*
* dentry_iput drops the locks, at which point nobody (except
* transient RCU lookups) can reach this dentry.
*/
- BUG_ON((int)dentry->d_lockref.count > 0);
+ BUG_ON(dentry->d_lockref.count > 0);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
struct dentry *parent = dentry->d_parent;
if (IS_ROOT(dentry))
return NULL;
- if (unlikely((int)dentry->d_lockref.count < 0))
+ if (unlikely(dentry->d_lockref.count < 0))
return NULL;
if (likely(spin_trylock(&parent->d_lock)))
return parent;
return parent;
}
+ /*
+ * Try to do a lockless dput(), and return whether that was successful.
+ *
+ * If unsuccessful, we return false, having already taken the dentry lock.
+ *
+ * The caller needs to hold the RCU read lock, so that the dentry is
+ * guaranteed to stay around even if the refcount goes down to zero!
+ */
+ static inline bool fast_dput(struct dentry *dentry)
+ {
+ int ret;
+ unsigned int d_flags;
+
+ /*
+ * If we have a d_op->d_delete() operation, we sould not
+ * let the dentry count go to zero, so use "put__or_lock".
+ */
+ if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
+ return lockref_put_or_lock(&dentry->d_lockref);
+
+ /*
+ * .. otherwise, we can try to just decrement the
+ * lockref optimistically.
+ */
+ ret = lockref_put_return(&dentry->d_lockref);
+
+ /*
+ * If the lockref_put_return() failed due to the lock being held
+ * by somebody else, the fast path has failed. We will need to
+ * get the lock, and then check the count again.
+ */
+ if (unlikely(ret < 0)) {
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_lockref.count > 1) {
+ dentry->d_lockref.count--;
+ spin_unlock(&dentry->d_lock);
+ return 1;
+ }
+ return 0;
+ }
+
+ /*
+ * If we weren't the last ref, we're done.
+ */
+ if (ret)
+ return 1;
+
+ /*
+ * Careful, careful. The reference count went down
+ * to zero, but we don't hold the dentry lock, so
+ * somebody else could get it again, and do another
+ * dput(), and we need to not race with that.
+ *
+ * However, there is a very special and common case
+ * where we don't care, because there is nothing to
+ * do: the dentry is still hashed, it does not have
+ * a 'delete' op, and it's referenced and already on
+ * the LRU list.
+ *
+ * NOTE! Since we aren't locked, these values are
+ * not "stable". However, it is sufficient that at
+ * some point after we dropped the reference the
+ * dentry was hashed and the flags had the proper
+ * value. Other dentry users may have re-gotten
+ * a reference to the dentry and change that, but
+ * our work is done - we can leave the dentry
+ * around with a zero refcount.
+ */
+ smp_rmb();
+ d_flags = ACCESS_ONCE(dentry->d_flags);
+ d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
+
+ /* Nothing to do? Dropping the reference was all we needed? */
+ if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
+ return 1;
+
+ /*
+ * Not the fast normal case? Get the lock. We've already decremented
+ * the refcount, but we'll need to re-check the situation after
+ * getting the lock.
+ */
+ spin_lock(&dentry->d_lock);
+
+ /*
+ * Did somebody else grab a reference to it in the meantime, and
+ * we're no longer the last user after all? Alternatively, somebody
+ * else could have killed it and marked it dead. Either way, we
+ * don't need to do anything else.
+ */
+ if (dentry->d_lockref.count) {
+ spin_unlock(&dentry->d_lock);
+ return 1;
+ }
+
+ /*
+ * Re-get the reference we optimistically dropped. We hold the
+ * lock, and we just tested that it was zero, so we can just
+ * set it to 1.
+ */
+ dentry->d_lockref.count = 1;
+ return 0;
+ }
+
+
/*
* This is dput
*
return;
repeat:
- if (lockref_put_or_lock(&dentry->d_lockref))
+ rcu_read_lock();
+ if (likely(fast_dput(dentry))) {
+ rcu_read_unlock();
return;
+ }
+
+ /* Slow case: now with the dentry lock held */
+ rcu_read_unlock();
/* Unreachable? Get rid of it */
if (unlikely(d_unhashed(dentry)))
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free it.
*/
- if ((int)dentry->d_lockref.count > 0) {
+ if (dentry->d_lockref.count > 0) {
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);
}
}
-static enum lru_status
-dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
+static enum lru_status dentry_lru_isolate(struct list_head *item,
+ struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
* another pass through the LRU.
*/
if (dentry->d_lockref.count) {
- d_lru_isolate(dentry);
+ d_lru_isolate(lru, dentry);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
}
return LRU_ROTATE;
}
- d_lru_shrink_move(dentry, freeable);
+ d_lru_shrink_move(lru, dentry, freeable);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
/**
* prune_dcache_sb - shrink the dcache
* @sb: superblock
- * @nr_to_scan : number of entries to try to free
- * @nid: which node to scan for freeable entities
+ * @sc: shrink control, passed to list_lru_shrink_walk()
*
- * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
- * done when we need more memory an called from the superblock shrinker
+ * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
+ * is done when we need more memory and called from the superblock shrinker
* function.
*
* This function may fail to free any resources if all the dentries are in
* use.
*/
-long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
- int nid)
+long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
{
LIST_HEAD(dispose);
long freed;
- freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
- &dispose, &nr_to_scan);
+ freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
+ dentry_lru_isolate, &dispose);
shrink_dentry_list(&dispose);
return freed;
}
static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
- spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
if (!spin_trylock(&dentry->d_lock))
return LRU_SKIP;
- d_lru_shrink_move(dentry, freeable);
+ d_lru_shrink_move(lru, dentry, freeable);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
}
atomic_set(&p->u.count, 1);
dname = p->name;
+ if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
+ kasan_unpoison_shadow(dname,
+ round_up(name->len + 1, sizeof(unsigned long)));
} else {
dname = dentry->d_iname;
}
}
EXPORT_SYMBOL(d_hash_and_lookup);
- /**
- * d_validate - verify dentry provided from insecure source (deprecated)
- * @dentry: The dentry alleged to be valid child of @dparent
- * @dparent: The parent dentry (known to be valid)
- *
- * An insecure source has sent us a dentry, here we verify it and dget() it.
- * This is used by ncpfs in its readdir implementation.
- * Zero is returned in the dentry is invalid.
- *
- * This function is slow for big directories, and deprecated, do not use it.
- */
- int d_validate(struct dentry *dentry, struct dentry *dparent)
- {
- struct dentry *child;
-
- spin_lock(&dparent->d_lock);
- list_for_each_entry(child, &dparent->d_subdirs, d_child) {
- if (dentry == child) {
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- __dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&dparent->d_lock);
- return 1;
- }
- }
- spin_unlock(&dparent->d_lock);
-
- return 0;
- }
- EXPORT_SYMBOL(d_validate);
-
/*
* When a file is deleted, we have two options:
* - turn this dentry into a negative dentry
struct linux_binprm;
struct path;
struct mount;
+struct shrink_control;
/*
* block_dev.c
* inode.c
*/
extern spinlock_t inode_sb_list_lock;
-extern long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan,
- int nid);
+extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
extern void inode_add_lru(struct inode *inode);
/*
*/
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
extern int d_set_mounted(struct dentry *dentry);
-extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
- int nid);
+extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
/*
* read_write.c
/*
* fs_pin.c
*/
- extern void sb_pin_kill(struct super_block *sb);
+ extern void group_pin_kill(struct hlist_head *p);
extern void mnt_pin_kill(struct mount *m);
/*
#endif
}
+ static void drop_mountpoint(struct fs_pin *p)
+ {
+ struct mount *m = container_of(p, struct mount, mnt_umount);
+ dput(m->mnt_ex_mountpoint);
+ pin_remove(p);
+ mntput(&m->mnt);
+ }
+
static struct mount *alloc_vfsmnt(const char *name)
{
struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
goto out_free_cache;
if (name) {
- mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
+ mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
if (!mnt->mnt_devname)
goto out_free_id;
}
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif
+ init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
}
return mnt;
#ifdef CONFIG_SMP
out_free_devname:
- kfree(mnt->mnt_devname);
+ kfree_const(mnt->mnt_devname);
#endif
out_free_id:
mnt_free_id(mnt);
static void free_vfsmnt(struct mount *mnt)
{
- kfree(mnt->mnt_devname);
+ kfree_const(mnt->mnt_devname);
#ifdef CONFIG_SMP
free_percpu(mnt->mnt_pcp);
#endif
static void namespace_unlock(void)
{
- struct mount *mnt;
struct hlist_head head = unmounted;
if (likely(hlist_empty(&head))) {
head.first->pprev = &head.first;
INIT_HLIST_HEAD(&unmounted);
-
- /* undo decrements we'd done in umount_tree() */
- hlist_for_each_entry(mnt, &head, mnt_hash)
- if (mnt->mnt_ex_mountpoint.mnt)
- mntget(mnt->mnt_ex_mountpoint.mnt);
-
up_write(&namespace_sem);
synchronize_rcu();
- while (!hlist_empty(&head)) {
- mnt = hlist_entry(head.first, struct mount, mnt_hash);
- hlist_del_init(&mnt->mnt_hash);
- if (mnt->mnt_ex_mountpoint.mnt)
- path_put(&mnt->mnt_ex_mountpoint);
- mntput(&mnt->mnt);
- }
+ group_pin_kill(&head);
}
static inline void namespace_lock(void)
{
HLIST_HEAD(tmp_list);
struct mount *p;
- struct mount *last = NULL;
for (p = mnt; p; p = next_mnt(p, mnt)) {
hlist_del_init_rcu(&p->mnt_hash);
if (how)
propagate_umount(&tmp_list);
- hlist_for_each_entry(p, &tmp_list, mnt_hash) {
+ while (!hlist_empty(&tmp_list)) {
+ p = hlist_entry(tmp_list.first, struct mount, mnt_hash);
+ hlist_del_init_rcu(&p->mnt_hash);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
__touch_mnt_namespace(p->mnt_ns);
p->mnt_ns = NULL;
if (how < 2)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
+
+ pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
if (mnt_has_parent(p)) {
hlist_del_init(&p->mnt_mp_list);
put_mountpoint(p->mnt_mp);
mnt_add_count(p->mnt_parent, -1);
- /* move the reference to mountpoint into ->mnt_ex_mountpoint */
- p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
- p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
+ /* old mountpoint will be dropped when we can do that */
+ p->mnt_ex_mountpoint = p->mnt_mountpoint;
p->mnt_mountpoint = p->mnt.mnt_root;
p->mnt_parent = p;
p->mnt_mp = NULL;
}
change_mnt_propagation(p, MS_PRIVATE);
- last = p;
- }
- if (last) {
- last->mnt_hash.next = unmounted.first;
- if (unmounted.first)
- unmounted.first->pprev = &last->mnt_hash.next;
- unmounted.first = tmp_list.first;
- unmounted.first->pprev = &unmounted.first;
}
}
struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
- struct proc_dir_entry *de = PROC_I(inode)->pde;
+ struct proc_dir_entry *de = PDE(inode);
if (de && de->nlink)
set_nlink(inode, de->nlink);
if (ret)
return ret;
- if (S_ISDIR(dp->mode)) {
- dp->proc_fops = &proc_dir_operations;
- dp->proc_iops = &proc_dir_inode_operations;
- dir->nlink++;
- } else if (S_ISLNK(dp->mode)) {
- dp->proc_iops = &proc_link_inode_operations;
- } else if (S_ISREG(dp->mode)) {
- BUG_ON(dp->proc_fops == NULL);
- dp->proc_iops = &proc_file_inode_operations;
- } else {
- WARN_ON(1);
- proc_free_inum(dp->low_ino);
- return -EINVAL;
- }
-
spin_lock(&proc_subdir_lock);
dp->parent = dir;
if (pde_subdir_insert(dir, dp) == false) {
WARN(1, "proc_dir_entry '%s/%s' already registered\n",
dir->name, dp->name);
spin_unlock(&proc_subdir_lock);
- if (S_ISDIR(dp->mode))
- dir->nlink--;
proc_free_inum(dp->low_ino);
return -EEXIST;
}
ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
if (ent->data) {
strcpy((char*)ent->data,dest);
+ ent->proc_iops = &proc_link_inode_operations;
if (proc_register(parent, ent) < 0) {
kfree(ent->data);
kfree(ent);
ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
if (ent) {
ent->data = data;
+ ent->proc_fops = &proc_dir_operations;
+ ent->proc_iops = &proc_dir_inode_operations;
+ parent->nlink++;
if (proc_register(parent, ent) < 0) {
kfree(ent);
+ parent->nlink--;
ent = NULL;
}
}
return NULL;
}
+ BUG_ON(proc_fops == NULL);
+
if ((mode & S_IALLUGO) == 0)
mode |= S_IRUGO;
pde = __proc_create(&parent, name, mode, 1);
goto out;
pde->proc_fops = proc_fops;
pde->data = data;
+ pde->proc_iops = &proc_file_inode_operations;
if (proc_register(parent, pde) < 0)
goto out_free;
return pde;
#include "internal.h"
-LIST_HEAD(super_blocks);
-DEFINE_SPINLOCK(sb_lock);
+static LIST_HEAD(super_blocks);
+static DEFINE_SPINLOCK(sb_lock);
static char *sb_writers_name[SB_FREEZE_LEVELS] = {
"sb_writers",
return SHRINK_STOP;
if (sb->s_op->nr_cached_objects)
- fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid);
+ fs_objects = sb->s_op->nr_cached_objects(sb, sc);
- inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
- dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
+ inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
+ dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
total_objects = dentries + inodes + fs_objects + 1;
if (!total_objects)
total_objects = 1;
/* proportion the scan between the caches */
dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
+ fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
/*
* prune the dcache first as the icache is pinned by it, then
* prune the icache, followed by the filesystem specific caches
+ *
+ * Ensure that we always scan at least one object - memcg kmem
+ * accounting uses this to fully empty the caches.
*/
- freed = prune_dcache_sb(sb, dentries, sc->nid);
- freed += prune_icache_sb(sb, inodes, sc->nid);
+ sc->nr_to_scan = dentries + 1;
+ freed = prune_dcache_sb(sb, sc);
+ sc->nr_to_scan = inodes + 1;
+ freed += prune_icache_sb(sb, sc);
if (fs_objects) {
- fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
- total_objects);
- freed += sb->s_op->free_cached_objects(sb, fs_objects,
- sc->nid);
+ sc->nr_to_scan = fs_objects + 1;
+ freed += sb->s_op->free_cached_objects(sb, sc);
}
drop_super(sb);
* scalability bottleneck. The counts could get updated
* between super_cache_count and super_cache_scan anyway.
* Call to super_cache_count with shrinker_rwsem held
- * ensures the safety of call to list_lru_count_node() and
+ * ensures the safety of call to list_lru_shrink_count() and
* s_op->nr_cached_objects().
*/
if (sb->s_op && sb->s_op->nr_cached_objects)
- total_objects = sb->s_op->nr_cached_objects(sb,
- sc->nid);
+ total_objects = sb->s_op->nr_cached_objects(sb, sc);
- total_objects += list_lru_count_node(&sb->s_dentry_lru,
- sc->nid);
- total_objects += list_lru_count_node(&sb->s_inode_lru,
- sc->nid);
+ total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
+ total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
total_objects = vfs_pressure_ratio(total_objects);
return total_objects;
}
init_waitqueue_head(&s->s_writers.wait);
init_waitqueue_head(&s->s_writers.wait_unfrozen);
+ s->s_bdi = &noop_backing_dev_info;
s->s_flags = flags;
- s->s_bdi = &default_backing_dev_info;
INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
- if (list_lru_init(&s->s_dentry_lru))
+ if (list_lru_init_memcg(&s->s_dentry_lru))
goto fail;
- if (list_lru_init(&s->s_inode_lru))
+ if (list_lru_init_memcg(&s->s_inode_lru))
goto fail;
init_rwsem(&s->s_umount);
s->s_shrink.scan_objects = super_cache_scan;
s->s_shrink.count_objects = super_cache_count;
s->s_shrink.batch = 1024;
- s->s_shrink.flags = SHRINKER_NUMA_AWARE;
+ s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
return s;
fail:
unregister_shrinker(&s->s_shrink);
fs->kill_sb(s);
+ /*
+ * Since list_lru_destroy() may sleep, we cannot call it from
+ * put_super(), where we hold the sb_lock. Therefore we destroy
+ * the lru lists right now.
+ */
+ list_lru_destroy(&s->s_dentry_lru);
+ list_lru_destroy(&s->s_inode_lru);
+
put_filesystem(fs);
put_super(s);
} else {
remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
if (remount_ro) {
- if (sb->s_pins.first) {
+ if (!hlist_empty(&sb->s_pins)) {
up_write(&sb->s_umount);
- sb_pin_kill(sb);
+ group_pin_kill(&sb->s_pins);
down_write(&sb->s_umount);
if (!sb->s_root)
return 0;
int set_anon_super(struct super_block *s, void *data)
{
- int error = get_anon_bdev(&s->s_dev);
- if (!error)
- s->s_bdi = &noop_backing_dev_info;
- return error;
+ return get_anon_bdev(&s->s_dev);
}
EXPORT_SYMBOL(set_anon_super);
sb = root->d_sb;
BUG_ON(!sb);
WARN_ON(!sb->s_bdi);
- WARN_ON(sb->s_bdi == &default_backing_dev_info);
sb->s_flags |= MS_BORN;
error = security_sb_kern_mount(sb, flags, secdata);