linux 文件系统之superblock
linux-a6me:~ # lsblk NAME MAJ:MIN RM SIZE RO MOUNTPOINT sdc 8:32 0 298.1G 0 sdb 8:16 0 298.1G 0 |-sdb1 8:17 0 2G 0 [SWAP] `-sdb2 8:18 0 296.1G 0 / sda 8:0 0 298.1G 0 |-sda1 8:1 0 1G 0 /root/test |-sda2 8:2 0 297.1G 0 `-sda3 8:3 0 344K 0 linux-a6me:~ # mkdir /root/caq_home linux-a6me:~ # mount -t ext4 /dev/sda2 /root/caq_home/ mount: warning: /root/caq_home/ seems to be mounted read-only. linux-a6me:~ # cd /root/caq_home/ linux-a6me:~/caq_home # ls lost+found
你没看错,所有的数据全部变成了lost+found. 297.1G 数据,哪怕就是一堆lost+found,结果filesystem的state还是: not clean with errors
dumpe2fs -h /dev/sda2 dumpe2fs 1.41.9 (22-Aug-2009) Filesystem volume name: <none> Last mounted on: <not available> Filesystem UUID: 20a05fdb-4060-4f87-877d-4af17e31b76b Filesystem magic number: 0xEF53 Filesystem revision #: 1 (dynamic) Filesystem features: ext_attr resize_inode dir_index filetype extent sparse_super large_file Filesystem flags: signed_directory_hash Default mount options: (none) Filesystem state: not clean with errors
crash> super_block struct super_block { struct list_head s_list; dev_t s_dev;-------------------------------关联的设备 unsigned char s_blocksize_bits;-----------------块大小,转换为2的次方 unsigned long s_blocksize;---------------------块大小 loff_t s_maxbytes; struct file_system_type *s_type;----------------文件系统类型,比如是xfs的还是ext4还是sockfs等 const struct super_operations *s_op;------------这个最常见了,一般就是函数操作在这里面封装 const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags;-------------------------挂载的标志 unsigned long s_magic; struct dentry *s_root;-------------------------文件系统根目录的目录项对象 struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes;----------------------该super_block中所有inode的i_sb_list成员的双向链表 struct hlist_bl_head s_anon; struct list_head *s_files_deprecated; struct list_head s_mounts; struct list_head s_dentry_lru; int s_nr_dentry_unused; spinlock_t s_inode_lru_lock; struct list_head s_inode_lru; int s_nr_inodes_unused; struct block_device *s_bdev;-----------------这个是super_block对应的block_device,可以作为关键字用来比较相同fs_type的各个实例 struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances;---------------通过这个成员挂在同类型文件系统的hash链上, struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32];-------------------------------挂载id,比如nvme1等 u8 s_uuid[16]; void *s_fs_info;-----------------------------重点关注,指向私有机构,不同的fs_type有不同的解释,比如ext4指向 ext4_super_block,xfs指向xfs_mount等, unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct callback_head rcu; struct hlist_head s_pins; } SIZE: 1088
一个文件系统,比如xfs,可能有多个superblock结构在内存中,比如两块硬盘,都是xfs格式,挂载之后就有两个superblock了。
list_add_tail(&s->s_list, &super_blocks);//superblock,通过s_list成员,全部挂在一个super_blocks的全局链表尾 hlist_add_head(&s->s_instances, &type->fs_supers);//同类型的superblock,通过s_instance成员,串在对应type的->fs_super成员哈希链表中
。。。。。
}
查看一下这个super_blocks的双链表:
crash> list -H super_blocks ffff880157908800------------- ffff880157909000 ffff880157909800 ffff88015790c800 ffff88013d5d8800 ffff8828b5290800 。。。。
crash> super_block.s_type ffff880157908800
s_type = 0xffffffff81aa4100 <sysfs_fs_type>
我们可以看到,第一个super_block的fs_type一般是sysfs_fs_type,这个类型应该是最早注册的。谁越早就越排在前面,除非重新unregister了。因为内核中我们使用 file_systems 这个全局
变量来管理注册的fs,但是越晚加入同类型的fs的hlist的位置在hlist中越靠前。
p file_systems file_systems = $2 = (struct file_system_type *) 0xffffffff81aa4100 <sysfs_fs_type> crash> list file_system_type.next -s file_system_type.name,fs_flags 0xffffffff81aa4100 ffffffff81aa4100 name = 0xffffffff8194bffc "sysfs" fs_flags = 8 ffffffff81a12440 name = 0xffffffff8192eaba "rootfs" fs_flags = 0 ffffffff81aa4ee0 name = 0xffffffff8193196c "ramfs" fs_flags = 8 ffffffff81a9ede0 name = 0xffffffff8192ef3f "bdev" fs_flags = 0 ffffffff81aa3ba0 name = 0xffffffff81921ccf "proc" fs_flags = 8 。。。。。
ffffffffc012c920
name = 0xffffffffc0122933 "ext3"
fs_flags = 513
ffffffffc012c960
name = 0xffffffffc0122938 "ext2"
fs_flags = 513
ffffffffc012c020
name = 0xffffffffc01221e4 "ext4"
fs_flags = 1537
。。。。
ffffffffc08b2000
name = 0xffffffffc0897006 "xfs"-------------这个业务使用的xfs
fs_flags = 3841
同fs_type类型的superblock,通过s_instance成员,串在对应type的->fs_super成员哈希链表中,比如我们看到的sysfs的实例有多少个:
crash> file_system_type.fs_supers 0xffffffff81aa4100
fs_supers = {
first = 0xffff880135c90938
}
crash> list 0xffff880135c90938 -l super_block.s_instances -s super_block.s_type ffff880135c90938 s_type = 0xffffffff81aa4100 <sysfs_fs_type> ffff88017fdc8938 s_type = 0xffffffff81aa4100 <sysfs_fs_type>
可以看到sysfs的实例是两个。
看下我们实际使用的xfs的实例:
[root@localhost code]# df -hT |grep xfs /dev/nvme2n1 xfs 3.5T 3.1T 459G 88% /mnt/S481NY0K400047 /dev/nvme1n1 xfs 3.5T 3.1T 461G 88% /mnt/S481NY0K400054 /dev/nvme0n1 xfs 3.5T 3.1T 454G 88% /mnt/S481NY0K400067 /dev/nvme3n1 xfs 3.5T 3.1T 445G 88% /mnt/S481NY0K400044 /dev/sda xfs 1.9T 33M 1.9T 1% /mnt/P6KUWGKV
看下内核中打印:
crash> file_system_type ffffffffc08b2000 struct file_system_type { name = 0xffffffffc0897006 "xfs", fs_flags = 3841, mount = 0xffffffffc08672c0, kill_sb = 0xffffffff8120b990 <kill_block_super>, owner = 0xffffffffc08bd280, next = 0x0, fs_supers = { first = 0xffff881dfa68e138 }, s_lock_key = {<No data fields>}, s_umount_key = {<No data fields>}, s_vfs_rename_key = {<No data fields>}, s_writers_key = 0xffffffffc08b2038, i_lock_key = {<No data fields>}, i_mutex_key = {<No data fields>}, i_mutex_dir_key = {<No data fields>} } crash> list 0xffff881dfa68e138 -l super_block.s_instances -s super_block.s_type ffff881dfa68e138 s_type = 0xffffffffc08b2000 ffff88013cf76138 s_type = 0xffffffffc08b2000 ffff88220430c938 s_type = 0xffffffffc08b2000 ffff884973a25138 s_type = 0xffffffffc08b2000 ffff8827caa56138 s_type = 0xffffffffc08b2000
发现我们xfs的s_type并没有显示为xfs,我们把xfs模块加载进来:
crash> mod |grep xfs ffffffffc08bd280 xfs 978100 (not loaded) [CONFIG_KALLSYMS] crash> mod -s xfs MODULE NAME SIZE OBJECT FILE ffffffffc08bd280 xfs 978100 /usr/lib/debug/usr/lib/modules/3.10.0-693.21.1.el7.x86_64/kernel/fs/xfs/xfs.ko.debug crash> crash>
crash> list 0xffff881dfa68e138 -l super_block.s_instances -s super_block.s_type,s_bdev,s_id
ffff881dfa68e138
s_type = 0xffffffffc08b2000 <xfs_fs_type>
s_bdev = 0xffff8857b5688340
s_id = "sda\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
ffff88013cf76138
s_type = 0xffffffffc08b2000 <xfs_fs_type>
s_bdev = 0xffff8857b5690000
s_id = "nvme3n1\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
ffff88220430c938
s_type = 0xffffffffc08b2000 <xfs_fs_type>
s_bdev = 0xffff8827d3068000
s_id = "nvme0n1\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
ffff884973a25138
s_type = 0xffffffffc08b2000 <xfs_fs_type>
s_bdev = 0xffff8857b5688000
s_id = "nvme1n1\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
ffff8827caa56138
s_type = 0xffffffffc08b2000 <xfs_fs_type>
s_bdev = 0xffff8857b5698000
s_id = "nvme2n1\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
这次可以显示为xfs_fs_type了,刚好5个实例,和df -hT对的上,id也对得上。
比较典型fs的type比较多,你看连sock都是属于一个fs。还有,我们注册自己的fs的话,也会体现在这个链表中。
static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; static struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", .mount = xfs_fs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV | FS_HAS_RM_XQUOTA | FS_HAS_INVALIDATE_RANGE | FS_HAS_DIO_IODONE2 | FS_HAS_NEXTDQBLK, };
super_block放在一起有哪些用处,比如当我们需要控制pagecache的时候,如果是控制某一个fs_type的缓存,则尽量从file_systems中去找这个
file_system_type,然后 利用 hlist_for_each_entry 遍历其 成员就行,然后在迭代这些super_block 去控制inode中的内存占用。如果直接使用 iterate_supers 来迭代的话,需要将
sys_mount - >
static struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", .mount = xfs_fs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV | FS_HAS_RM_XQUOTA | FS_HAS_INVALIDATE_RANGE | FS_HAS_DIO_IODONE2 | FS_HAS_NEXTDQBLK, };
STATIC struct dentry * xfs_fs_mount( struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); }
mount_bdev 函数会根据mount的设备/dev/sdx,找到对应的block_device,
然后调用 前面刚介绍的 sget 函数来获取super_block,如果在全局链表中未找到的话,则申请一个super_block的结构,
并回调不同的fill_super 函数来填充这个super_block结构的相关成员,对于xfs是 xfs_fs_fill_super,
比如关键的s_op就是在此函数中设置的:
sb->s_fs_info = mp;
。。。
sb->s_op = &xfs_super_operations;
在读入物理设备上的superblock之前,会解析mount的参数,比如对于xfs,这个由 xfs_parseargs 负责:
xfs_init_mount_workqueues :每个xfs挂载设备,都会创建很多工作队列
xfs_init_percpu_counters :为了性能要求,创建percpu的计数器,
创建统计信息的结构,这个由 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);语句搞定:
xfs_readsb:准备工作完毕,开始真实的读取超级块的函数了,这个函数主要就是申请一个xfs_buf,然后构造bio,去读取指定位置的块,经过转换,把物理上的超级块结构转换为
内存中需要的一些结构,读取超级块,它的位置是0,注意这个0和mbr要区分开来,这个0的上下文是指这个分区的第0个块,而MBR在分区之外,事实上MBR中包含主引导程序和分区信息。
xfs_finish_flags :将一些挂载参数,xfs_parseargs 分析完的这些参数,使用xfs_finish_flags 继续填充 xfs_mount 的 m_sb成员。
[root@localhost xfs]# pwd /sys/fs/xfs [root@localhost xfs]# ls nvme0n1 nvme1n1 nvme2n1 nvme3n1 sda stats [root@localhost xfs]#
[root@localhost error]# ls
fail_at_unmount metadata
[root@localhost error]# pwd
/sys/fs/xfs/nvme0n1/error
c.挂载带uuid与否的一些判断。
d. xfs_set_rw_sizes,设置默认的最小read和write的size。
e.xfs_rtmount_init 实时挂载模式的处理,比如带 rtdev=device 的一些处理。