Linux容器技术和lxc(docker前身)

容器技术发展历程

操作系统容器技术最早出现在FreeBSD上叫做 jail(虚拟化技术),2000年,freebsd4.0版本发布的技术,将一个进程放入jail(监狱)中运行,不管这个进程在其中发生怎样的错误都不会影响到系统上其他进程的运行

jail技术在Linux中的实现叫做vserver,vserver所实现的功能体现为chroot

Linux容器功能是基于 cgroups 和 Namespace 来实现的. 所以要了解 Linux 容器必须先了解 cgroup 和 Namespace.

有了namespaces和cgroups那么Linux已经拥有了实现容器技术的前提,这时如果想要使用容器技术可以通过写代码调用系统调用实现容器技术。

更有用的一些参考
一,实体机虚拟化技术(资源和系统分离,硬件抽象,传统方式)
1.xen
2.vmware
3.kvm
二.系统容器虚拟化技术(进程隔离)
1.FreeBSD Jails 
https://www.freebsd.org/cgi/man.cgi?query=jail&format=html
http://www.kuqin.com/docs/freebsd-handbook/jails.html
2.openvz
  https://wiki.openvz.org/Main_Page
 http://ftp.sjtu.edu.cn/sites/download.openvz.org/doc/openvz-intro.pdf
3.lxc
https://linuxcontainers.org/lxc/
4.docker(事实上已成为了一种工业标准)
  早期docker基于lxc
5.rkt(coreos)
  coreos和docker是竞争关系,coreos确实也做了不少事情.

一、什么是 Namespace

Linux 在很早的版本中就实现了部分的 namespace,比如内核 2.4 就实现了 mount namespace。大多数的 namespace 支持是在内核 2.6 中完成的,比如 IPC、Network、PID、和 UTS等。还有个别的 namespace 比较特殊,比如 User,从内核 2.6 就开始实现了,但在内核 3.8 中才宣布完成。
同时,随着 Linux 自身的发展以及容器技术持续发展带来的需求,也会有新的 namespace 被支持,比如在内核 4.6 中就添加了 Cgroup namespace。

Linux 提供了多个内核级API 用来操作namespaces,它们是 clone()、setns() 和 unshare() 函数,为了确定隔离的到底是哪项 namespace,在使用这些 API 时,通常需要指定一些调用参数:CLONE_NEWIPC、CLONE_NEWNET、CLONE_NEWNS、CLONE_NEWPID、CLONE_NEWUSER、CLONE_NEWUTS 和 CLONE_NEWCGROUP。如果要同时隔离多个namespace,可以使用 | (按位或)组合这些参数。同时我们还可以通过 /proc 下面的一些文件来操作 namespace。
随便调研一个进程id下ns

//进入/proc/2151进程
[root@ht6 2151]# ls attr clear_refs cpuset fd limits mem net oom_score personality schedstat stack syscall wchan autogroup cmdline cwd fdinfo loginuid mountinfo ns oom_score_adj projid_map sessionid stat task auxv comm environ gid_map map_files mounts numa_maps pagemap root setgroups statm timers cgroup coredump_filter exe io maps mountstats oom_adj patch_state sched smaps status uid_map
[root@ht6 2151]# ls /proc/2151/ns ipc mnt net pid user uts
//所以只要创建进程必然会有namespace对应,父进程有父进程的命令空间,子进程有自己的命令空间,
E:\linux内核\linux-2.6.38.5\linux-2.6.38.5\include\linux\pid.h 查看知道,命令空间是多层的,多级的
E:\linux内核\linux-2.6.38.5\linux-2.6.38.5\include\linux\pid_namespace.h ##pid namespace的管理
//cgroups需要你自己创建.

linux linux-2.6.38.5 内核相关代码相关调用:


E:\linux内核\linux-2.6.38.5\linux-2.6.38.5\include\linux\nsproxy.h

#ifndef _LINUX_NSPROXY_H
#define _LINUX_NSPROXY_H
#include <linux/spinlock.h>
#include <linux/sched.h>
struct mnt_namespace;
struct uts_namespace;
struct ipc_namespace;
struct pid_namespace;
struct fs_struct;
/*
 * A structure to contain pointers to all per-process  namespaces - fs (mount), uts, network, sysvipc, etc.
 */
struct nsproxy {
    atomic_t count;
    struct uts_namespace *uts_ns;
    struct ipc_namespace *ipc_ns;
    struct mnt_namespace *mnt_ns;
    struct pid_namespace *pid_ns;
    struct net           *net_ns;
};
extern struct nsproxy init_nsproxy;
/* the namespaces access rules  */
static inline struct nsproxy *task_nsproxy(struct task_struct *tsk)
{
    return rcu_dereference(tsk->nsproxy);
}
int  copy_namespaces(unsigned long flags, struct task_struct *tsk);
void exit_task_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
void free_nsproxy(struct nsproxy *ns);
int  unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, struct fs_struct *);
static inline void put_nsproxy(struct nsproxy *ns)
{
    if (atomic_dec_and_test(&ns->count)) {
        free_nsproxy(ns);
    }
}
static inline void get_nsproxy(struct nsproxy *ns)
{
    atomic_inc(&ns->count);
}
#ifdef CONFIG_CGROUP_NS
int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid);
#else
static inline int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid)
{
    return 0;
}
#endif

#endif
user_namespace 用户空间
E:\linux内核\linux-2.6.38.5\linux-2.6.38.5\include\linux\user_namespace.h
....
//user namespace的第一个作用是隔离uid/gid
struct user_namespace { struct kref kref; struct hlist_head uidhash_table[UIDHASH_SZ]; struct user_struct *creator; struct work_struct destroyer; };
//init_user_ns 是最初产生这些进程的命名空间,因为它是内核启动时存在的唯一的用户命名空间
//用户命名空间 是一种用来用于分隔 init_user_ns UID 空间的机制
//它允许将存在于不同用户命名空间的各个容器的 UID 逐一映射到宿主机上的 UID
extern struct user_namespace init_user_ns;
//clone() 和 unshare()时如果设置了CLONE_NEWUSER标志,则会调用create_user_ns()来创建一个新的user namespace
extern int create_user_ns(struct cred *new);
extern void free_user_ns(struct kref *kref);
static inline struct user_namespace *get_user_ns(struct user_namespace *ns){}
static inline void put_user_ns(struct user_namespace *ns){}
uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid); gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid);
...

查看user namespace是否支持,主要看是否有gid_map和uid_map

[root@ht5 proc]# ls /proc/374
ls: cannot read symbolic link /proc/374/exe: No such file or directory
attr       clear_refs       cpuset   fd       limits     mem         net        oom_score      personality  schedstat  stack   syscall  wchan
autogroup  cmdline          cwd      fdinfo   loginuid   mountinfo   ns         oom_score_adj  projid_map   sessionid  stat    task
auxv       comm             environ  gid_map  map_files  mounts      numa_maps  pagemap        root         setgroups  statm   timers
cgroup     coredump_filter  exe      io       maps       mountstats  oom_adj    patch_state    sched        smaps      status  uid_map

我们可以关闭和启用user namespace

//启用
[root@ht5 proc]#grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
//关闭
[root@ht5 proc]#grubby --remove-args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
//需要重启系统
参考
https://github.com/procszoo/procszoo/wiki/How-to-enable-%22user%22-namespace-in-RHEL7-and-CentOS7%3F


task_struct 结构体
 E:\linux内核\linux-2.6.38.5\linux-2.6.38.5\include\linux\sched.h

内核中为了描述和控制进程的运行,为每个进程定义了一个数据结构——进程控制块。在linux内核中,这个数据结构就是task_struct
//一个进程对应一个task_struct
//
1193行 struct task_struct { volatile long state;
//进程内核栈地址
void *stack
/* namespaces */ struct nsproxy *nsproxy;
   pid_t pid; //该进程对应的进程ID
pid_t tgid; //线程组ID
/*每个进程对应一个css_set结构,css_set存储了与进程相关的cgropus信息*/
struct css_set *cgroups;
   /*
cg_list是一个嵌入的 list_head 结构,用于将连到同一个 css_set 的进程组织成一个链表。进程和css_set的关系是多对一关系*/
struct list_head cg_list;
......
}

 
namespace 是 Linux 内核用来隔离内核资源的方式。
通过 namespace 可以让一些进程只能看到与自己相关的一部分资源,而另外一些进程也只能看到与它们自己相关的资源,这两拨进程根本就感觉不到对方的存在。具体的实现方式是把一个或多个进程的相关资源指定在同一个 namespace 中。
简单来说Namespace 来隔离进程,CGroup 用来限制进程资源使用(这个机制不光是给容器技术使用,系统内设置某进程使用资源限制(cpu,内存等)同样适用)

下面简单列举下有哪些 Namespace:

Namespace名称 linux系统显示 资源 系统调用参数 内核版本
Mount Namespace mount 为进程隔离文件系统的挂载点。换言之,在一个空间内的 mount 或 umount 不会影响到另一个空间。 CLONE_NEWNS 2.4.19
UTS Namespace  uts uts为进程提供独立隔离的 hostname 和 domainname 空间。 CLONE_NEWUTS 2.6.19
IPC Namespace ipc ipc为进程提供隔离的 IPC(进程间通信)空间,例如消息队列、共享内存、信号量。 CLONE_NEWIPC 2.6.19
Network Namespace net 隔离网络资源(每个命名空间下都有独立的网络协议栈) CLONE_NEWNET 2.6.29
PID Namespace pid 进程id集合(有层级和所属关系,还有是否可见等特性) CLONE_NEWPID 2.6.24
CGroup  /sys/fs/cgroups      4.6

User Namespace

user

user ns是用来隔离和分割管理权限的,管理权限实质分为两部分uid/gid和capability CLONE_NEWUSER 3.8

 

查看下本系统有关所有当前可访问的名称空间或给定名称空间的信息.

[root@ht6 cgroup]# lsns
        NS TYPE  NPROCS    PID USER COMMAND
4026531836 pid      170      1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531837 user     189      1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531838 uts      170      1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531839 ipc      170      1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531840 mnt      168      1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531956 net      178      1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
//这里是我建docker aozhejin-nginx容器时创建的 4026532571 mnt 9 88208 root nginx: master process nginx -g daemon off 4026532572 uts 9 88208 root nginx: master process nginx -g daemon off 4026532573 ipc 9 88208 root nginx: master process nginx -g daemon off 4026532574 pid 9 88208 root nginx: master process nginx -g daemon off 4026532576 net 1 126042 root /usr/lib/systemd/systemd-machined 4026532641 mnt 1 126042 root /usr/lib/systemd/systemd-machined
//这里建lxc容器mylxc时创建的 4026532648 mnt 10 1144 root /sbin/init 4026532651 uts 10 1144 root /sbin/init 4026532652 ipc 10 1144 root /sbin/init 4026532653 pid 10 1144 root /sbin/init 4026532655 net 10 1144 root /sbin/init

查看当前进程所属的 namespace 信息
//某进程下的内容空间.

[root@ht6 cgroup]# ls /proc/1144/ns
ipc  mnt  net  pid  user  uts

 [root@ht6 ns]# cd /proc/1144/ns

 [root@ht6 ns]# stat pid
 File: ‘pid’ -> ‘pid:[4026532653]’
 Size: 0 Blocks: 0 IO Block: 1024 symbolic link
 Device: 3h/3d Inode: 11022132 Links: 1
 Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root)
 Access: 2022-04-27 15:24:10.113754983 +0800
 Modify: 2022-04-27 15:02:31.651815508 +0800
 Change: 2022-04-27 15:02:31.651815508 +0800

 

二、什么是 CGroup

CGroup 全称 Control Group,是 Linux 内核提供的物理资源隔离机制,通过这种机制,可以实现对 Linux 进程或者进程组的资源限制、隔离和统计功能。

CGroup 为每种可以控制的资源定义了一个子系统。典型的子系统介绍如下:

cpu 子系统,可以限制进程的 cpu 使用率。
cpuacct 子系统,可以统计进程的 cpu 使用报告。
cpuset 子系统,可以为进程分配单独的 cpu 节点或者内存节点。
memory 子系统,可以限制进程的 memory 使用量。
blkio 子系统,可以限制进程的块设备 io。
devices 子系统,可以控制进程能够访问某些设备。
net_cls 子系统,可以标记 cgroups 中进程的网络数据包,然后可以使用 tc 模块(traffic control)对数据包进行控制。
freezer 子系统,可以挂起或者恢复 cgroups 中的进程。
ns 子系统,可以使不同 cgroups 下面的进程使用不同的 namespace

 

 CGroups(具体的命名空间)能够限制的资源有,进入相关目录查看cgroup子系统

[root@ht6 ~]# cd /sys/fs/cgroup/
 [root@ht6 cgroup]# ll
total 0
drwxr-xr-x 7 root root  0 Apr 21 08:57 blkio              //设置对块设备(比如硬盘)输入输出的访问控制
lrwxrwxrwx 1 root root 11 Apr 21 08:57 cpu -> cpu,cpuacct      //设置 cgroup 中进程的 CPU 被调度的策略
lrwxrwxrwx 1 root root 11 Apr 21 08:57 cpuacct -> cpu,cpuacct  //可以统计 cgroup 中进程的 CPU 占用
drwxr-xr-x 7 root root  0 Apr 21 08:57 cpu,cpuacct //控制 cgroup 中进程对设备的访问
drwxr-xr-x 4 root root  0 Apr 21 08:57 cpuset      //在多核机器上设置 cgroup 中进程可以使用的 CPU 和内存(此处内存仅使用于NUMA 架构)
drwxr-xr-x 7 root root  0 Apr 21 08:57 devices     //控制 cgroup 中进程对设备的访问
drwxr-xr-x 4 root root  0 Apr 21 08:57 freezer     //用于挂起( suspend )和恢复( resume) cgroup 中的进程
drwxr-xr-x 4 root root  0 Apr 21 08:57 hugetlb
drwxr-xr-x 7 root root  0 Apr 21 08:57 memory                       //用于控制 cgroup 中进程的内存占用
lrwxrwxrwx 1 root root 16 Apr 21 08:57 net_cls -> net_cls,net_prio  //用于将 cgroup 中进程产生的网络包分类
drwxr-xr-x 4 root root  0 Apr 21 08:57 net_cls,net_prio
lrwxrwxrwx 1 root root 16 Apr 21 08:57 net_prio -> net_cls,net_prio //设置 cgroup 中进程产生的网络流量的优先级
drwxr-xr-x 4 root root  0 Apr 21 08:57 perf_event
drwxr-xr-x 7 root root  0 Apr 21 08:57 pids   //进程资源
drwxr-xr-x 6 root root  0 Apr 21 08:57 systemd  //

安装cgroups管理工具来查看cgroups子系统(从另外角度)

[root@ht6 ~]#  yum install libcgroup libcgroup-tools
//查看subsystem即子系统
[root@ht6 ~]# lssubsys //查看 https://linux.die.net/man/1/lssubsys cpuset cpu,cpuacct memory devices freezer net_cls,net_prio blkio perf_event hugetlb pids
安装的cgroups相关工具包括 cgcreate,cgset,cgexec等


如果你安装Docker后,在/sys/fs/cgroup/memory/docker/目录下可以看到对Docker组应用的各种限制项,包括
[root@ht5 ~]# cd /sys/fs/cgroup/memory/ [root@ht5 memory]# ls cgroup.clone_children memory.failcnt memory.kmem.tcp.limit_in_bytes memory.memsw.limit_in_bytes memory.soft_limit_in_bytes system.slice cgroup.event_control memory.force_empty memory.kmem.tcp.max_usage_in_bytes memory.memsw.max_usage_in_bytes memory.stat tasks cgroup.procs memory.kmem.failcnt memory.kmem.tcp.usage_in_bytes memory.memsw.usage_in_bytes memory.swappiness user.slice cgroup.sane_behavior memory.kmem.limit_in_bytes memory.kmem.usage_in_bytes memory.move_charge_at_immigrate memory.usage_in_bytes docker memory.kmem.max_usage_in_bytes memory.limit_in_bytes memory.numa_stat memory.use_hierarchy kubepods memory.kmem.slabinfo memory.max_usage_in_bytes memory.oom_control notify_on_release kube-proxy memory.kmem.tcp.failcnt memory.memsw.failcnt memory.pressure_level release_agent

//我们可以修改这些文件的值来控制组限制Docker应用资源
//如果这台是一个节点机器,一旦你频繁的重启pod,就可以导致内存溢出
请看: https://www.cnblogs.com/aozhejin/p/16155776.html

 

我们在linux下cgroups在docker中的应用:

[root@ht6 ~]# lsns
        NS TYPE  NPROCS   PID USER COMMAND
4026531836 pid      164     1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531837 user     173     1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531838 uts      164     1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531839 ipc      164     1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531840 mnt      163     1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 224026531956 net      173     1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026532571 mnt        9  1892 root nginx: master process nginx -g daemon off
4026532572 uts        9  1892 root nginx: master process nginx -g daemon off
4026532573 ipc        9  1892 root nginx: master process nginx -g daemon off
4026532574 pid        9  1892 root nginx: master process nginx -g daemon off

 [root@ht6 ~]# docker inspect 59a690d4aa20 | grep Pid
 "Pid": 1892,
 "PidMode": "",
 "PidsLimit": null,

//列出某进程命名空间(根据条件) 

 [root@ht6 ~]# lsns -p 1892 -t net -t mnt
 NS TYPE NPROCS PID USER COMMAND
 4026531956 net 173 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
 4026532571 mnt 9 1892 root nginx: master process nginx -g daemon off 

 

//下面我们看下cgroup的隔离相关资源情况(本机安装有docker,容器只启动了一个)

[root@ht6 ~]# cd /sys/fs/cgroup/
[root@ht6 cgroup]# ls
blkio cpu cpuacct cpu,cpuacct cpuset devices freezer hugetlb memory net_cls net_cls,net_prio net_prio perf_event pids systemd
[root@ht6 cgroup]# ll
total 0
drwxr-xr-x 5 root root 0 Apr 21 08:57 blkio
lrwxrwxrwx 1 root root 11 Apr 21 08:57 cpu -> cpu,cpuacct
lrwxrwxrwx 1 root root 11 Apr 21 08:57 cpuacct -> cpu,cpuacct
drwxr-xr-x 5 root root 0 Apr 21 08:57 cpu,cpuacct
drwxr-xr-x 3 root root 0 Apr 21 08:57 cpuset
drwxr-xr-x 5 root root 0 Apr 21 08:57 devices
drwxr-xr-x 3 root root 0 Apr 21 08:57 freezer
drwxr-xr-x 3 root root 0 Apr 21 08:57 hugetlb
drwxr-xr-x 5 root root 0 Apr 21 08:57 memory
lrwxrwxrwx 1 root root 16 Apr 21 08:57 net_cls -> net_cls,net_prio
drwxr-xr-x 3 root root 0 Apr 21 08:57 net_cls,net_prio
lrwxrwxrwx 1 root root 16 Apr 21 08:57 net_prio -> net_cls,net_prio
drwxr-xr-x 3 root root 0 Apr 21 08:57 perf_event
drwxr-xr-x 5 root root 0 Apr 21 08:57 pids
drwxr-xr-x 5 root root 0 Apr 21 08:57 systemd
[root@ht6 cgroup]# cd pids
[root@ht6 pids]# ll
total 0
-rw-r--r-- 1 root root 0 Apr 21 08:57 cgroup.clone_children
--w--w--w- 1 root root 0 Apr 21 08:57 cgroup.event_control
-rw-r--r-- 1 root root 0 Apr 21 08:57 cgroup.procs
-r--r--r-- 1 root root 0 Apr 21 08:57 cgroup.sane_behavior
drwxr-xr-x 3 root root 0 Apr 21 08:58 docker
-rw-r--r-- 1 root root 0 Apr 21 08:57 notify_on_release
-r--r--r-- 1 root root 0 Apr 21 08:57 pids.current
-rw-r--r-- 1 root root 0 Apr 21 08:57 release_agent
drwxr-xr-x 69 root root 0 Apr 22 21:42 system.slice
-rw-r--r-- 1 root root 0 Apr 21 08:57 tasks
drwxr-xr-x 2 root root 0 Apr 21 08:57 user.slice
[root@ht6 pids]# cd docker
[root@ht6 docker]# ll
total 0
drwxr-xr-x 2 root root 0 Apr 21 08:58 59a690d4aa20bbc5121e08f61b01d12e2c0ec784ad604620705bc8f94c426344
-rw-r--r-- 1 root root 0 Apr 21 08:58 cgroup.clone_children
--w--w--w- 1 root root 0 Apr 21 08:58 cgroup.event_control
-rw-r--r-- 1 root root 0 Apr 21 08:58 cgroup.procs
-rw-r--r-- 1 root root 0 Apr 21 08:58 notify_on_release
-r--r--r-- 1 root root 0 Apr 21 08:58 pids.current
-rw-r--r-- 1 root root 0 Apr 21 08:58 pids.max
-rw-r--r-- 1 root root 0 Apr 21 08:58 tasks
[root@ht6 docker]# cd 59a690d4aa20bbc5121e08f61b01d12e2c0ec784ad604620705bc8f94c426344
[root@ht6 59a690d4aa20bbc5121e08f61b01d12e2c0ec784ad604620705bc8f94c426344]# ls
cgroup.clone_children cgroup.event_control cgroup.procs notify_on_release pids.current pids.max tasks
[root@ht6 59a690d4aa20bbc5121e08f61b01d12e2c0ec784ad604620705bc8f94c426344]# ll
total 0
-rw-r--r-- 1 root root 0 Apr 21 08:58 cgroup.clone_children
--w--w--w- 1 root root 0 Apr 21 08:58 cgroup.event_control
-rw-r--r-- 1 root root 0 Apr 21 08:58 cgroup.procs
-rw-r--r-- 1 root root 0 Apr 21 08:58 notify_on_release
-r--r--r-- 1 root root 0 Apr 21 08:58 pids.current
-rw-r--r-- 1 root root 0 Apr 21 08:58 pids.max
-rw-r--r-- 1 root root 0 Apr 21 08:58 tasks
[root@ht6 59a690d4aa20bbc5121e08f61b01d12e2c0ec784ad604620705bc8f94c426344]#

[root@ht6 59a690d4aa20bbc5121e08f61b01d12e2c0ec784ad604620705bc8f94c426344]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
59a690d4aa20 nginx:1.13.12-alpine "nginx -g 'daemon of…" 37 hours ago Up 37 hours nginx-proxy

 

有个典型应用就是通过命名空间使用宿主机的相关命令,程序等

可参看 https://www.cnblogs.com/aozhejin/p/15923339.html

//我们通过一个程序看下,docker容器只有nginx这个容器启动了,其他可以通过docker ps -a 查看.

[root@ht6 59a690d4aa20bbc5121e08f61b01d12e2c0ec784ad604620705bc8f94c426344]# lsns
NS TYPE NPROCS PID USER COMMAND
4026531836 pid 163 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531837 user 172 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531838 uts 163 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531839 ipc 163 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531840 mnt 162 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531856 mnt 1 48 root kdevtmpfs
4026531956 net 172 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026532571 mnt 9 1892 root nginx: master process nginx -g daemon off
4026532572 uts 9 1892 root nginx: master process nginx -g daemon off
4026532573 ipc 9 1892 root nginx: master process nginx -g daemon off
4026532574 pid 9 1892 root nginx: master process nginx -g daemon off

//容器的进程号是1892
[root@ht6 59a690d4aa20bbc5121e08f61b01d12e2c0ec784ad604620705bc8f94c426344]# lsns --task 1892
NS TYPE NPROCS PID USER COMMAND
4026531837 user 172 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026531956 net 172 1 root /usr/lib/systemd/systemd --switched-root --system --deserialize 22
4026532571 mnt 9 1892 root nginx: master process nginx -g daemon off
4026532572 uts 9 1892 root nginx: master process nginx -g daemon off
4026532573 ipc 9 1892 root nginx: master process nginx -g daemon off
4026532574 pid 9 1892 root nginx: master process nginx -g daemon off

//看到该进程下有很多空间,每个空间下有很多其他资源.

 

 介绍namespace/cgroups应用-lxc

 主页: https://linuxcontainers.org/lxc/

What's LXC?

LXC is a userspace interface for the Linux kernel containment features. Through a powerful API and simple tools, it lets Linux users easily create and manage system or application containers.

这个项目组成为: LXD(lxc客户端) , LXC,LXCFS 和distrobuilder

Current LXC uses the following kernel features to contain processes:

  • Kernel namespaces (ipc, uts, mount, pid, network and user)
  • Apparmor and SELinux profiles
  • Seccomp policies
  • Chroots (using pivot_root)
  • Kernel capabilities
  • CGroups (control groups)


项目地址:https://github.com/lxc/lxc 有段英文介绍
 LXC is the well-known and heavily tested low-level Linux container runtime. It is in active development since 2008 and has proven itself in critical production environments world-wide. Some of its core contributors are the same people that helped to implement various well-known containerization features inside the Linux kernel.


 

  我们看下命名空间模型:

 

概述

Container and virtualization tools(容器和虚拟化工具)

linuxcontainers.org is the umbrella project behind LXD, LXC, LXCFS and distrobuilder.

The goal is to offer a distro and vendor neutral environment for the development of Linux container technologies.

Our focus is providing containers and virtual machines that run full Linux systems. While VMs supply a complete environment, system containers offer an environment as close as possible to the one you'd get from a VM, but without the overhead that comes with running a separate kernel and simulating all the hardware.

早期的docker版本就是基于lxc项目。与传统虚拟化技术相比,它的优势在于:
1)与宿主机使用同一个内核,性能损耗小;
(2)不需要指令级模拟;
(3)不需要即时(Just-in-time)编译;
(4)容器可以在CPU核心的本地运行指令,不需要任何专门的解释机制;
(5)避免了准虚拟化和系统调用替换中的复杂性;
(6)轻量级隔离,在隔离的同时还提供共享机制,以实现容器与宿主机的资源共享。
总结:Linux Container是一种轻量级的虚拟化的手段。
Linux Container提供了在单一可控主机节点上支持多个相互隔离的server container同时执行的机制。
Linux Container有点像chroot,提供了一个拥有自己进程和网络空间的虚拟环境,但又有别于虚拟机,因为lxc是一种操作系统层次上的资源的虚拟化。

 

备注:内核和操作系统是同个层次,所以称作内核虚拟机化技术,也称作操作系统层次的虚拟化技术
 

内核要求:

LXC runs on any kernel from 2.6.32 onwards. All it requires is a functional C compiler. LXC works on all architectures that provide the necessary kernel features. This includes (but isn't limited to):

  • i686
  • x86_64
  • ppc, ppc64, ppc64le
  • riscv64
  • s390x
  • armvl7, arm64

LXC also supports at least the following C standard libraries:

  • glibc
  • musl
  • bionic (Android's libc)
 

LXC的版本

之前LXC是放在Sourceforge上 https://sourceforge.net/projects/lxc/ ,目前已经转移到了 https://github.com/lxc/lxc
但是有老的版本例如 2018年8月6日的 lxc-0.1.0.tar.gz 初始版本,都是用c语言写的
https://sourceforge.net/projects/lxc/files/lxc/lxc-0.1.0/
https://github.com/lxc/lxc/releases/tag/lxc_0_1_0
  
目前最新版是 2020年4月17日更新.

最新的请到 
https://linuxcontainers.org/lxc/downloads/   下载       lxc-4.0.12.tar.gz
https://github.com/lxc/lxc/releases/tag/lxc-4.0.12.tar.gz  下载  lxc-4.0.12.tar.gz  
我们会发现他们的大小明显不同.


LXC项目本身只是一个为用户提供一个用户空间的工具集,用来使用和管理LXC容器。LXC真正的实现则是靠Linux内核的相关特性,LXC项目只是对此做了整合。基于容器的虚拟化技术起源于所谓的资源容器和安全容器。
LXC在资源管理方面依赖于Linux内核的cgroups子系统,cgroups子系统是Linux内核提供的一个基于进程组的资源管理的框架,可以为特定的进程组限定可以使用的资源。LXC在隔离控制方面依赖于Linux内核的namespace特性,具体而言就是在clone时加入相应的flag(NEWNS NEWPID等等)。
 

LXC架构的介绍

通过namespace进行资源的隔离,Guest1下的进程与Guset2下的进程是独立的,可以看作运行在两台物理机上一样。Contaniner管理工具就是对Guest进行管理的(创建、销毁)。

图是对LXC架构的介绍

 

下图是LXC与KVM技术的比较,KVM的优点是一个物理机上可以跑多个操作系统(Guest-OS),然后在每个操作系统运行应用,通过这种方式实现应用的隔离。而使用LXC技术直接可以在Host-OS的基础上实现隔离的。这就是LXC的优势--运行快。但是,如果有两个应用一个是在windows运行的,一个是在linux上运行的,这时只能使用KVM技术来实现了。

LXC的使用

安装部署LXC
//安装epel源
[root@192 ~]# yum -y install epel-release

//安装依赖包
[root@192 ~]# yum -y install lxc lxc-templates bridge-utils lxc-libs libcgroup libvirt perl debootstrap

Total 710 kB/s | 22 MB 00:00:31
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : glusterfs-libs-6.0-61.el7.x86_64 1/56
Installing : lxc-libs-1.0.11-2.el7.x86_64 2/56
Installing : unbound-libs-1.6.6-5.el7_8.x86_64 3/56
Installing : boost-iostreams-1.53.0-28.el7.x86_64 4/56
Installing : boost-random-1.53.0-28.el7.x86_64 5/56
Installing : 1:librados2-10.2.5-4.el7.x86_64 6/56
Installing : 1:librbd1-10.2.5-4.el7.x86_64 7/56
Installing : gnutls-dane-3.3.29-9.el7_6.x86_64 8/56
Installing : gnutls-utils-3.3.29-9.el7_6.x86_64 9/56
Installing : glusterfs-client-xlators-6.0-61.el7.x86_64 10/56
Installing : glusterfs-6.0-61.el7.x86_64 11/56
Installing : glusterfs-api-6.0-61.el7.x86_64 12/56
Installing : glusterfs-cli-6.0-61.el7.x86_64 13/56
Installing : iscsi-initiator-utils-6.2.0.874-22.el7_9.x86_64 14/56
Installing : iscsi-initiator-utils-iscsiuio-6.2.0.874-22.el7_9.x86_64 15/56
Installing : libvirt-bash-completion-4.5.0-36.el7_9.5.x86_64 16/56
Installing : libxslt-1.1.28-6.el7.x86_64 17/56
Installing : netcf-libs-0.2.8-4.el7.x86_64 18/56
Installing : dpkg-1.18.25-10.el7.x86_64 19/56
Installing : lua-alt-getopt-0.7.0-4.el7.noarch 20/56
Installing : cyrus-sasl-gssapi-2.1.26-24.el7_9.x86_64 21/56
Installing : libiscsi-1.9.0-7.el7.x86_64 22/56
Installing : gperftools-libs-2.6.1-1.el7.x86_64 23/56
Installing : 10:qemu-img-1.5.3-175.el7_9.5.x86_64 24/56
Installing : numad-0.5-18.20150602git.el7.x86_64 25/56
Installing : lua-filesystem-1.6.2-2.el7.x86_64 26/56
Installing : lua-lxc-1.0.11-2.el7.x86_64 27/56
Installing : radvd-2.17-3.el7.x86_64 28/56
Installing : lzop-1.03-10.el7.x86_64 29/56
Installing : cyrus-sasl-2.1.26-24.el7_9.x86_64 30/56
Installing : libvirt-libs-4.5.0-36.el7_9.5.x86_64 31/56
Installing : libvirt-daemon-4.5.0-36.el7_9.5.x86_64 32/56
Installing : libvirt-daemon-driver-storage-core-4.5.0-36.el7_9.5.x86_64 33/56
Installing : libvirt-daemon-driver-network-4.5.0-36.el7_9.5.x86_64 34/56
Installing : libvirt-daemon-driver-nwfilter-4.5.0-36.el7_9.5.x86_64 35/56
Installing : libvirt-daemon-config-nwfilter-4.5.0-36.el7_9.5.x86_64 36/56
Installing : libvirt-daemon-driver-qemu-4.5.0-36.el7_9.5.x86_64 37/56
Installing : libvirt-daemon-config-network-4.5.0-36.el7_9.5.x86_64 38/56
Installing : libvirt-daemon-driver-storage-gluster-4.5.0-36.el7_9.5.x86_64 39/56
Installing : libvirt-daemon-driver-storage-scsi-4.5.0-36.el7_9.5.x86_64 40/56
Installing : libvirt-daemon-driver-storage-logical-4.5.0-36.el7_9.5.x86_64 41/56
Installing : libvirt-daemon-driver-storage-disk-4.5.0-36.el7_9.5.x86_64 42/56
Installing : libvirt-daemon-driver-storage-iscsi-4.5.0-36.el7_9.5.x86_64 43/56
Installing : libvirt-daemon-driver-storage-rbd-4.5.0-36.el7_9.5.x86_64 44/56
Installing : libvirt-daemon-driver-storage-mpath-4.5.0-36.el7_9.5.x86_64 45/56
Installing : libvirt-daemon-driver-storage-4.5.0-36.el7_9.5.x86_64 46/56
Installing : libvirt-daemon-driver-secret-4.5.0-36.el7_9.5.x86_64 47/56
Installing : libvirt-daemon-driver-nodedev-4.5.0-36.el7_9.5.x86_64 48/56
Installing : libvirt-daemon-driver-interface-4.5.0-36.el7_9.5.x86_64 49/56
Installing : libvirt-client-4.5.0-36.el7_9.5.x86_64 50/56
Installing : fuse-libs-2.9.2-11.el7.x86_64 51/56
Installing : libvirt-daemon-driver-lxc-4.5.0-36.el7_9.5.x86_64 52/56
Installing : libvirt-4.5.0-36.el7_9.5.x86_64 53/56
Installing : lxc-1.0.11-2.el7.x86_64 54/56
Installing : debootstrap-1.0.126-1.nmu1.el7.noarch 55/56
Installing : lxc-templates-1.0.11-2.el7.x86_64 56/56
Verifying : 1:librbd1-10.2.5-4.el7.x86_64 1/56
Verifying : libvirt-daemon-driver-storage-core-4.5.0-36.el7_9.5.x86_64 2/56
Verifying : fuse-libs-2.9.2-11.el7.x86_64 3/56
Verifying : iscsi-initiator-utils-iscsiuio-6.2.0.874-22.el7_9.x86_64 4/56
Verifying : libvirt-daemon-driver-lxc-4.5.0-36.el7_9.5.x86_64 5/56
Verifying : libvirt-daemon-driver-storage-gluster-4.5.0-36.el7_9.5.x86_64 6/56
Verifying : lxc-libs-1.0.11-2.el7.x86_64 7/56
Verifying : iscsi-initiator-utils-6.2.0.874-22.el7_9.x86_64 8/56
Verifying : gnutls-dane-3.3.29-9.el7_6.x86_64 9/56
Verifying : glusterfs-libs-6.0-61.el7.x86_64 10/56
Verifying : cyrus-sasl-2.1.26-24.el7_9.x86_64 11/56
Verifying : debootstrap-1.0.126-1.nmu1.el7.noarch 12/56
Verifying : libvirt-daemon-driver-secret-4.5.0-36.el7_9.5.x86_64 13/56
Verifying : lzop-1.03-10.el7.x86_64 14/56
Verifying : libvirt-daemon-driver-qemu-4.5.0-36.el7_9.5.x86_64 15/56
Verifying : radvd-2.17-3.el7.x86_64 16/56
Verifying : glusterfs-client-xlators-6.0-61.el7.x86_64 17/56
Verifying : libvirt-daemon-driver-storage-scsi-4.5.0-36.el7_9.5.x86_64 18/56
Verifying : lxc-templates-1.0.11-2.el7.x86_64 19/56
Verifying : glusterfs-6.0-61.el7.x86_64 20/56
Verifying : 10:qemu-img-1.5.3-175.el7_9.5.x86_64 21/56
Verifying : lua-filesystem-1.6.2-2.el7.x86_64 22/56
Verifying : libvirt-daemon-driver-storage-logical-4.5.0-36.el7_9.5.x86_64 23/56
Verifying : libvirt-daemon-driver-storage-disk-4.5.0-36.el7_9.5.x86_64 24/56
Verifying : numad-0.5-18.20150602git.el7.x86_64 25/56
Verifying : gperftools-libs-2.6.1-1.el7.x86_64 26/56
Verifying : lua-lxc-1.0.11-2.el7.x86_64 27/56
Verifying : netcf-libs-0.2.8-4.el7.x86_64 28/56
Verifying : boost-random-1.53.0-28.el7.x86_64 29/56
Verifying : libvirt-daemon-driver-storage-iscsi-4.5.0-36.el7_9.5.x86_64 30/56
Verifying : libiscsi-1.9.0-7.el7.x86_64 31/56
Verifying : libvirt-daemon-driver-nwfilter-4.5.0-36.el7_9.5.x86_64 32/56
Verifying : glusterfs-api-6.0-61.el7.x86_64 33/56
Verifying : 1:librados2-10.2.5-4.el7.x86_64 34/56
Verifying : cyrus-sasl-gssapi-2.1.26-24.el7_9.x86_64 35/56
Verifying : boost-iostreams-1.53.0-28.el7.x86_64 36/56
Verifying : lxc-1.0.11-2.el7.x86_64 37/56
Verifying : libvirt-libs-4.5.0-36.el7_9.5.x86_64 38/56
Verifying : libvirt-daemon-driver-network-4.5.0-36.el7_9.5.x86_64 39/56
Verifying : libvirt-daemon-driver-storage-rbd-4.5.0-36.el7_9.5.x86_64 40/56
Verifying : libvirt-daemon-driver-storage-4.5.0-36.el7_9.5.x86_64 41/56
Verifying : libvirt-daemon-4.5.0-36.el7_9.5.x86_64 42/56
Verifying : libvirt-daemon-config-nwfilter-4.5.0-36.el7_9.5.x86_64 43/56
Verifying : libvirt-4.5.0-36.el7_9.5.x86_64 44/56
Verifying : lua-alt-getopt-0.7.0-4.el7.noarch 45/56
Verifying : dpkg-1.18.25-10.el7.x86_64 46/56
Verifying : libxslt-1.1.28-6.el7.x86_64 47/56
Verifying : libvirt-daemon-driver-nodedev-4.5.0-36.el7_9.5.x86_64 48/56
Verifying : unbound-libs-1.6.6-5.el7_8.x86_64 49/56
Verifying : libvirt-daemon-driver-interface-4.5.0-36.el7_9.5.x86_64 50/56
Verifying : libvirt-bash-completion-4.5.0-36.el7_9.5.x86_64 51/56
Verifying : gnutls-utils-3.3.29-9.el7_6.x86_64 52/56
Verifying : libvirt-daemon-driver-storage-mpath-4.5.0-36.el7_9.5.x86_64 53/56
Verifying : glusterfs-cli-6.0-61.el7.x86_64 54/56
Verifying : libvirt-client-4.5.0-36.el7_9.5.x86_64 55/56
Verifying : libvirt-daemon-config-network-4.5.0-36.el7_9.5.x86_64 56/56


Installed:
debootstrap.noarch 0:1.0.126-1.nmu1.el7 libvirt.x86_64 0:4.5.0-36.el7_9.5 lxc.x86_64 0:1.0.11-2.el7 lxc-libs.x86_64 0:1.0.11-2.el7
lxc-templates.x86_64 0:1.0.11-2.el7


Dependency Installed:
boost-iostreams.x86_64 0:1.53.0-28.el7 boost-random.x86_64 0:1.53.0-28.el7
cyrus-sasl.x86_64 0:2.1.26-24.el7_9 cyrus-sasl-gssapi.x86_64 0:2.1.26-24.el7_9
dpkg.x86_64 0:1.18.25-10.el7 fuse-libs.x86_64 0:2.9.2-11.el7
glusterfs.x86_64 0:6.0-61.el7 glusterfs-api.x86_64 0:6.0-61.el7
glusterfs-cli.x86_64 0:6.0-61.el7 glusterfs-client-xlators.x86_64 0:6.0-61.el7
glusterfs-libs.x86_64 0:6.0-61.el7 gnutls-dane.x86_64 0:3.3.29-9.el7_6
gnutls-utils.x86_64 0:3.3.29-9.el7_6 gperftools-libs.x86_64 0:2.6.1-1.el7
iscsi-initiator-utils.x86_64 0:6.2.0.874-22.el7_9 iscsi-initiator-utils-iscsiuio.x86_64 0:6.2.0.874-22.el7_9
libiscsi.x86_64 0:1.9.0-7.el7 librados2.x86_64 1:10.2.5-4.el7
librbd1.x86_64 1:10.2.5-4.el7 libvirt-bash-completion.x86_64 0:4.5.0-36.el7_9.5
libvirt-client.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-config-network.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-config-nwfilter.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-driver-interface.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-driver-lxc.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-driver-network.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-driver-nodedev.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-driver-nwfilter.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-driver-qemu.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-driver-secret.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-driver-storage.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-driver-storage-core.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-driver-storage-disk.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-driver-storage-gluster.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-driver-storage-iscsi.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-driver-storage-logical.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-driver-storage-mpath.x86_64 0:4.5.0-36.el7_9.5
libvirt-daemon-driver-storage-rbd.x86_64 0:4.5.0-36.el7_9.5 libvirt-daemon-driver-storage-scsi.x86_64 0:4.5.0-36.el7_9.5
libvirt-libs.x86_64 0:4.5.0-36.el7_9.5 libxslt.x86_64 0:1.1.28-6.el7
lua-alt-getopt.noarch 0:0.7.0-4.el7 lua-filesystem.x86_64 0:1.6.2-2.el7
lua-lxc.x86_64 0:1.0.11-2.el7 lzop.x86_64 0:1.03-10.el7
netcf-libs.x86_64 0:0.2.8-4.el7 numad.x86_64 0:0.5-18.20150602git.el7
qemu-img.x86_64 10:1.5.3-175.el7_9.5 radvd.x86_64 0:2.17-3.el7
unbound-libs.x86_64 0:1.6.6-5.el7_8


Complete!

//启动服务
[root@192 ~]# systemctl start lxc libvirtd
[root@192 ~]# ss -antl 
State            Recv-Q           Send-Q                     Local Address:Port                       Peer Address:Port           Process           
LISTEN           0                128                              0.0.0.0:111                             0.0.0.0:*                                
LISTEN           0                32                         192.168.122.1:53                              0.0.0.0:*                                
LISTEN           0                128                              0.0.0.0:22                              0.0.0.0:*                                
LISTEN           0                128                                 [::]:111                                [::]:*                                
LISTEN           0                128                                 [::]:22                                 [::]:*    

//检查系统是否满足容器使用要求,判断linux内核是否支持LXC

[root@ht6 ~]# lxc-checkconfig
Kernel configuration not found at /proc/config.gz; searching...
Kernel configuration found at /boot/config-3.10.0-1160.62.1.el7.x86_64  //内核的配置

检查分四个部分 Namespaces ,cgroups ,Misc ,Checkpoint/Restore
1.--- Namespaces ---
Namespaces: enabled
Utsname namespace: enabled
Ipc namespace: enabled
Pid namespace: enabled
User namespace: enabled
Warning: newuidmap is not setuid-root
Warning: newgidmap is not setuid-root
Network namespace: enabled
Multiple /dev/pts instances: enabled

2.--- Control groups ---
Cgroup: enabled
Cgroup clone_children flag: enabled
Cgroup device: enabled
Cgroup sched: enabled
Cgroup cpu account: enabled
Cgroup memory controller: enabled
Cgroup cpuset: enabled

3.--- Misc ---
Veth pair device: enabled
Macvlan: enabled
Vlan: enabled
Bridges: enabled
Advanced netfilter: enabled
CONFIG_NF_NAT_IPV4: enabled
CONFIG_NF_NAT_IPV6: enabled
CONFIG_IP_NF_TARGET_MASQUERADE: enabled
CONFIG_IP6_NF_TARGET_MASQUERADE: enabled
CONFIG_NETFILTER_XT_TARGET_CHECKSUM: enabled

4.--- Checkpoint/Restore ---
checkpoint restore: enabled
CONFIG_FHANDLE: enabled
CONFIG_EVENTFD: enabled
CONFIG_EPOLL: enabled
CONFIG_UNIX_DIAG: enabled
CONFIG_INET_DIAG: enabled
CONFIG_PACKET_DIAG: enabled
CONFIG_NETLINK_DIAG: enabled
File capabilities: enabled

Note : Before booting a new kernel, you can check its configuration
usage : CONFIG=/path/to/config /usr/bin/lxc-checkconfig

cat /usr/bin/lxc-checkconfig ,检测用的配置文件

[root@ht6 lxc]# cat /usr/bin/lxc-checkconfig
#!/bin/sh

# Allow environment variables to override config
: ${CONFIG:=/proc/config.gz}
: ${MODNAME:=configs}

CAT="cat"

if [ -t 1 ]; then
    SETCOLOR_SUCCESS="printf \\033[1;32m"
    SETCOLOR_FAILURE="printf \\033[1;31m"
    SETCOLOR_WARNING="printf \\033[1;33m"
    SETCOLOR_NORMAL="printf \\033[0;39m"
else
    SETCOLOR_SUCCESS=":"
    SETCOLOR_FAILURE=":"
    SETCOLOR_WARNING=":"
    SETCOLOR_NORMAL=":"
fi

is_set() {
    $CAT $CONFIG | grep "$1=[y|m]" > /dev/null
    return $?
}

is_enabled() {
    mandatory=$2

    is_set $1
    RES=$?

    if [ $RES -eq 0 ]; then
        $SETCOLOR_SUCCESS && echo "enabled" && $SETCOLOR_NORMAL
    else
        if [ ! -z "$mandatory" ] && [ "$mandatory" = yes ]; then
            $SETCOLOR_FAILURE && echo "required" && $SETCOLOR_NORMAL
        else
            $SETCOLOR_WARNING && echo "missing" && $SETCOLOR_NORMAL
        fi
    fi
}

if [ ! -f $CONFIG ]; then
    echo "Kernel configuration not found at $CONFIG; searching..."
    KVER="`uname -r`"
    HEADERS_CONFIG="/lib/modules/$KVER/build/.config"
    BOOT_CONFIG="/boot/config-$KVER"
    [ -f "${HEADERS_CONFIG}" ] && CONFIG=${HEADERS_CONFIG}
    [ -f "${BOOT_CONFIG}" ] && CONFIG=${BOOT_CONFIG}
    if [ ! -f "$CONFIG" ]; then
        MODULEFILE=$(modinfo -k $KVER -n $MODNAME 2> /dev/null)
        # don't want to modprobe, so give user a hint
        # although scripts/extract-ikconfig could be used to extract contents without loading kernel module
        # http://svn.pld-linux.org/trac/svn/browser/geninitrd/trunk/geninitrd?rev=12696#L327
    fi
    if [ ! -f $CONFIG ]; then
        echo "$(basename $0): unable to retrieve kernel configuration" >&2
        echo >&2
        if [ -f "$MODULEFILE" ]; then
            echo "Try modprobe $MODNAME module, or" >&2
        fi
        echo "Try recompiling with IKCONFIG_PROC, installing the kernel headers," >&2
        echo "or specifying the kernel configuration path with:" >&2
        echo "  CONFIG=<path> $(basename $0)" >&2
        exit 1
    else
        echo "Kernel configuration found at $CONFIG"
    fi
fi

if gunzip -tq < $CONFIG 2>/dev/null; then
    CAT="zcat"
fi

echo "--- Namespaces ---"
echo -n "Namespaces: " && is_enabled CONFIG_NAMESPACES yes
echo -n "Utsname namespace: " && is_enabled CONFIG_UTS_NS
echo -n "Ipc namespace: " && is_enabled CONFIG_IPC_NS yes
echo -n "Pid namespace: " && is_enabled CONFIG_PID_NS yes
echo -n "User namespace: " && is_enabled CONFIG_USER_NS
if is_set CONFIG_USER_NS; then
    if which newuidmap > /dev/null 2>&1; then
        f=`which newuidmap`
        if [ ! -u "${f}" ]; then
            echo "Warning: newuidmap is not setuid-root"
        fi
    else
        echo "newuidmap is not installed"
    fi
    if which newgidmap > /dev/null 2>&1; then
        f=`which newgidmap`
        if [ ! -u "${f}" ]; then
            echo "Warning: newgidmap is not setuid-root"
        fi
    else
        echo "newgidmap is not installed"
    fi
fi
echo -n "Network namespace: " && is_enabled CONFIG_NET_NS
echo -n "Multiple /dev/pts instances: " && is_enabled DEVPTS_MULTIPLE_INSTANCES
echo
echo "--- Control groups ---"

print_cgroups() {
  # print all mountpoints for cgroup filesystems
  awk '$1 !~ /#/ && $3 == mp { print $2; } ; END { exit(0); } '  "mp=$1" "$2" ;
}

CGROUP_MNT_PATH=`print_cgroups cgroup /proc/self/mounts | head -n 1`
KVER_MAJOR=$($CAT $CONFIG | grep '^# Linux.*Kernel Configuration' | \
    sed -r 's/.* ([0-9])\.[0-9]{1,2}\.[0-9]{1,3}.*/\1/')
if [ "$KVER_MAJOR" = "2" ]; then
KVER_MINOR=$($CAT $CONFIG | grep '^# Linux.*Kernel Configuration' | \
    sed -r 's/.* 2.6.([0-9]{2}).*/\1/')
else
KVER_MINOR=$($CAT $CONFIG | grep '^# Linux.*Kernel Configuration' | \
    sed -r 's/.* [0-9]\.([0-9]{1,3})\.[0-9]{1,3}.*/\1/')
fi

echo -n "Cgroup: " && is_enabled CONFIG_CGROUPS yes

if [ -f $CGROUP_MNT_PATH/cgroup.clone_children ]; then
    echo -n "Cgroup clone_children flag: " &&
    $SETCOLOR_SUCCESS && echo "enabled" && $SETCOLOR_NORMAL
else
    echo -n "Cgroup namespace: " && is_enabled CONFIG_CGROUP_NS yes
fi
echo -n "Cgroup device: " && is_enabled CONFIG_CGROUP_DEVICE
echo -n "Cgroup sched: " && is_enabled CONFIG_CGROUP_SCHED
echo -n "Cgroup cpu account: " && is_enabled CONFIG_CGROUP_CPUACCT
echo -n "Cgroup memory controller: "
if ([ $KVER_MAJOR -ge 3 ] && [ $KVER_MINOR -ge 6 ]) || ([ $KVER_MAJOR -gt 3 ]); then
    is_enabled CONFIG_MEMCG
else
    is_enabled CONFIG_CGROUP_MEM_RES_CTLR
fi
is_set CONFIG_SMP && echo -n "Cgroup cpuset: " && is_enabled CONFIG_CPUSETS
echo
echo "--- Misc ---"
echo -n "Veth pair device: " && is_enabled CONFIG_VETH
echo -n "Macvlan: " && is_enabled CONFIG_MACVLAN
echo -n "Vlan: " && is_enabled CONFIG_VLAN_8021Q
echo -n "Bridges: " && is_enabled CONFIG_BRIDGE
echo -n "Advanced netfilter: " && is_enabled CONFIG_NETFILTER_ADVANCED
echo -n "CONFIG_NF_NAT_IPV4: " && is_enabled CONFIG_NF_NAT_IPV4
echo -n "CONFIG_NF_NAT_IPV6: " && is_enabled CONFIG_NF_NAT_IPV6
echo -n "CONFIG_IP_NF_TARGET_MASQUERADE: " && is_enabled CONFIG_IP_NF_TARGET_MASQUERADE
echo -n "CONFIG_IP6_NF_TARGET_MASQUERADE: " && is_enabled CONFIG_IP6_NF_TARGET_MASQUERADE
echo -n "CONFIG_NETFILTER_XT_TARGET_CHECKSUM: " && is_enabled CONFIG_NETFILTER_XT_TARGET_CHECKSUM

echo
echo "--- Checkpoint/Restore ---"
echo -n "checkpoint restore: " && is_enabled CONFIG_CHECKPOINT_RESTORE
echo -n "CONFIG_FHANDLE: " && is_enabled CONFIG_FHANDLE
echo -n "CONFIG_EVENTFD: " && is_enabled CONFIG_EVENTFD
echo -n "CONFIG_EPOLL: " && is_enabled CONFIG_EPOLL
echo -n "CONFIG_UNIX_DIAG: " && is_enabled CONFIG_UNIX_DIAG
echo -n "CONFIG_INET_DIAG: " && is_enabled CONFIG_INET_DIAG
echo -n "CONFIG_PACKET_DIAG: " && is_enabled CONFIG_PACKET_DIAG
echo -n "CONFIG_NETLINK_DIAG: " && is_enabled CONFIG_NETLINK_DIAG

echo -n "File capabilities: " && \
    ( [ "${KVER_MAJOR}" = 2 ] && [ ${KVER_MINOR} -lt 33 ] && \
       is_enabled CONFIG_SECURITY_FILE_CAPABILITIES ) || \
    ( ( [ "${KVER_MAJOR}" = "2" ] && [ ${KVER_MINOR} -gt 32 ] ) || \
         [ ${KVER_MAJOR} -gt 2 ] && $SETCOLOR_SUCCESS && \
         echo "enabled" && $SETCOLOR_NORMAL )

echo
echo "Note : Before booting a new kernel, you can check its configuration"
echo "usage : CONFIG=/path/to/config $0"
echo
启动服务
[root@ht6 lxc]# systemctl start lxc

  查看服务配置文件

[root@ht6 lxc]# cat /usr/lib/systemd/system/lxc.service
[Unit]
Description=LXC Container Initialization and Autoboot Code
After=network-online.target

[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/libexec/lxc/lxc-devsetup
ExecStart=/usr/libexec/lxc/lxc-autostart-helper start
ExecStop=/usr/libexec/lxc/lxc-autostart-helper stop
# Environment=BOOTUP=serial
# Environment=CONSOLETYPE=serial
Delegate=yes
StandardOutput=syslog
StandardError=syslog

[Install]
WantedBy=multi-user.target

查看当前启动状态

[root@ht6 lxc]# systemctl status lxc
● lxc.service - LXC Container Initialization and Autoboot Code
   Loaded: loaded (/usr/lib/systemd/system/lxc.service; disabled; vendor preset: disabled)
   Active: active (exited) since Wed 2022-04-27 13:41:59 CST; 6s ago
  Process: 124729 ExecStart=/usr/libexec/lxc/lxc-autostart-helper start (code=exited, status=0/SUCCESS)
  Process: 124722 ExecStartPre=/usr/libexec/lxc/lxc-devsetup (code=exited, status=0/SUCCESS)
 Main PID: 124729 (code=exited, status=0/SUCCESS)
    Tasks: 0
   Memory: 0B
   CGroup: /system.slice/lxc.service

Apr 27 13:41:29 ht6.node systemd[1]: Starting LXC Container Initialization and Autoboot Code...
Apr 27 13:41:29 ht6.node lxc-devsetup[124722]: Creating /dev/.lxc
Apr 27 13:41:29 ht6.node lxc-devsetup[124722]: /dev is devtmpfs   // devtmpfs文件系统创建设备节点
Apr 27 13:41:29 ht6.node lxc-devsetup[124722]: Creating /dev/.lxc/user
Apr 27 13:41:59 ht6.node lxc-autostart-helper[124729]: Starting LXC autoboot containers:  [  OK  ]
Apr 27 13:41:59 ht6.node systemd[1]: Started LXC Container Initialization and Autoboot Code.

查看模板

[root@ht6 ~]#  ls /usr/share/lxc/templates/
lxc-alpine    lxc-archlinux  lxc-centos  lxc-debian    lxc-fedora  lxc-openmandriva  lxc-oracle  lxc-sshd    lxc-ubuntu-cloud
lxc-altlinux  lxc-busybox    lxc-cirros  lxc-download  lxc-gentoo  lxc-opensuse      lxc-plamo   lxc-ubuntu

 [root@ht6 bin]# ls lxc-*
 lxc-attach lxc-cgroup lxc-clone lxc-console lxc-destroy lxc-freeze lxc-monitor lxc-start lxc-top lxc-unshare lxc-wait
 lxc-autostart lxc-checkconfig lxc-config lxc-create lxc-execute lxc-info lxc-snapshot lxc-stop lxc-unfreeze lxc-usernsexec

 [root@ht6 bin]# man lxc   //查看lxc的相关帮助

启动虚拟网络
[root@ht6 lxc]# systemctl start libvirtd
[root@ht6 lxc]# systemctl status libvirtd
● libvirtd.service - Virtualization daemon
   Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled)
   Active: active (running) since Wed 2022-04-27 13:58:05 CST; 10s ago
     Docs: man:libvirtd(8)
           https://libvirt.org
 Main PID: 126044 (libvirtd)
    Tasks: 19 (limit: 32768)
   Memory: 13.1M
   CGroup: /system.slice/libvirtd.service
           ├─126044 /usr/sbin/libvirtd
           ├─126120 /usr/sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-script=/usr/libexec/libvirt_leaseshelper
           └─126121 /usr/sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-script=/usr/libexec/libvirt_leaseshelper

Apr 27 13:58:06 ht6.node dnsmasq[126120]: started, version 2.76 cachesize 150
Apr 27 13:58:06 ht6.node dnsmasq[126120]: compile time options: IPv6 GNU-getopt DBus no-i18n IDN DHCP DHCPv6 no-Lua TFTP no-conntrack ipset auth nettleha...ct inotify
Apr 27 13:58:06 ht6.node dnsmasq-dhcp[126120]: DHCP, IP range 192.168.122.2 -- 192.168.122.254, lease time 1h
Apr 27 13:58:06 ht6.node dnsmasq-dhcp[126120]: DHCP, sockets bound exclusively to interface virbr0
Apr 27 13:58:06 ht6.node dnsmasq[126120]: reading /etc/resolv.conf
Apr 27 13:58:06 ht6.node dnsmasq[126120]: using nameserver 10.128.55.1#53
Apr 27 13:58:06 ht6.node dnsmasq[126120]: using nameserver 114.114.114.114#53
Apr 27 13:58:06 ht6.node dnsmasq[126120]: read /etc/hosts - 24 addresses
Apr 27 13:58:06 ht6.node dnsmasq[126120]: read /var/lib/libvirt/dnsmasq/default.addnhosts - 0 addresses
Apr 27 13:58:06 ht6.node dnsmasq-dhcp[126120]: read /var/lib/libvirt/dnsmasq/default.hostsfile
Hint: Some lines were ellipsized, use -l to show in full.

创建容器 //创建容器lxc-create: 格式:lxc-create -n NAME -t TEMPLATE_NAME
[root@ht6 lxc]# lxc-create -t centos -n mylxc

....... 省略上面
Installed:
chkconfig.x86_64 0:1.7.6-1.el7 cronie.x86_64 0:1.4.11-24.el7_9 dhclient.x86_64 12:4.2.5-83.el7.centos.1 initscripts.x86_64 0:9.49.53-1.el7_9.1
openssh-clients.x86_64 0:7.4p1-22.el7_9 openssh-server.x86_64 0:7.4p1-22.el7_9 passwd.x86_64 0:0.79-6.el7 policycoreutils.x86_64 0:2.5-34.el7
rootfiles.noarch 0:8.1-11.el7 rsyslog.x86_64 0:8.24.0-57.el7_9.2 vim-minimal.x86_64 2:7.4.629-8.el7_9 yum.noarch 0:3.4.3-168.el7.centos

Dependency Installed:
acl.x86_64 0:2.2.51-15.el7 audit-libs.x86_64 0:2.8.5-4.el7 basesystem.noarch 0:10.0-7.el7.centos
bash.x86_64 0:4.2.46-35.el7_9 bc.x86_64 0:1.06.95-13.el7 bind-export-libs.x86_64 32:9.11.4-26.P2.el7_9.9
binutils.x86_64 0:2.27-44.base.el7_9.1 bzip2-libs.x86_64 0:1.0.6-13.el7 ca-certificates.noarch 0:2021.2.50-72.el7_9
centos-release.x86_64 0:7-9.2009.1.el7.centos coreutils.x86_64 0:8.22-24.el7_9.2 cpio.x86_64 0:2.11-28.el7
cracklib.x86_64 0:2.9.0-11.el7 cracklib-dicts.x86_64 0:2.9.0-11.el7 cronie-noanacron.x86_64 0:1.4.11-24.el7_9
crontabs.noarch 0:1.11-6.20121102git.el7 cryptsetup-libs.x86_64 0:2.0.3-6.el7 curl.x86_64 0:7.29.0-59.el7_9.1
cyrus-sasl-lib.x86_64 0:2.1.26-24.el7_9 dbus.x86_64 1:1.10.24-15.el7 dbus-libs.x86_64 1:1.10.24-15.el7
device-mapper.x86_64 7:1.02.170-6.el7_9.5 device-mapper-libs.x86_64 7:1.02.170-6.el7_9.5 dhcp-common.x86_64 12:4.2.5-83.el7.centos.1
dhcp-libs.x86_64 12:4.2.5-83.el7.centos.1 diffutils.x86_64 0:3.3-5.el7 dracut.x86_64 0:033-572.el7
elfutils-default-yama-scope.noarch 0:0.176-5.el7 elfutils-libelf.x86_64 0:0.176-5.el7 elfutils-libs.x86_64 0:0.176-5.el7
expat.x86_64 0:2.1.0-14.el7_9 file-libs.x86_64 0:5.11-37.el7 filesystem.x86_64 0:3.2-25.el7
findutils.x86_64 1:4.5.11-6.el7 fipscheck.x86_64 0:1.4.1-6.el7 fipscheck-lib.x86_64 0:1.4.1-6.el7
gawk.x86_64 0:4.0.2-4.el7_3.1 gdbm.x86_64 0:1.10-8.el7 glib2.x86_64 0:2.56.1-9.el7_9
glibc.x86_64 0:2.17-325.el7_9 glibc-common.x86_64 0:2.17-325.el7_9 gmp.x86_64 1:6.0.0-15.el7
gnupg2.x86_64 0:2.0.22-5.el7_5 gpgme.x86_64 0:1.3.2-5.el7 grep.x86_64 0:2.20-3.el7
gzip.x86_64 0:1.5-10.el7 hardlink.x86_64 1:1.0-19.el7 hostname.x86_64 0:3.13-3.el7_7.1
info.x86_64 0:5.1-5.el7 iproute.x86_64 0:4.11.0-30.el7 iptables.x86_64 0:1.4.21-35.el7
iputils.x86_64 0:20160308-10.el7 json-c.x86_64 0:0.11-4.el7_0 keyutils-libs.x86_64 0:1.5.8-3.el7
kmod.x86_64 0:20-28.el7 kmod-libs.x86_64 0:20-28.el7 kpartx.x86_64 0:0.4.9-135.el7_9
krb5-libs.x86_64 0:1.15.1-51.el7_9 libacl.x86_64 0:2.2.51-15.el7 libassuan.x86_64 0:2.1.0-3.el7
libattr.x86_64 0:2.4.46-13.el7 libblkid.x86_64 0:2.23.2-65.el7_9.1 libcap.x86_64 0:2.22-11.el7
libcap-ng.x86_64 0:0.7.5-4.el7 libcom_err.x86_64 0:1.42.9-19.el7 libcurl.x86_64 0:7.29.0-59.el7_9.1
libdb.x86_64 0:5.3.21-25.el7 libdb-utils.x86_64 0:5.3.21-25.el7 libedit.x86_64 0:3.0-12.20121213cvs.el7
libestr.x86_64 0:0.1.9-2.el7 libfastjson.x86_64 0:0.99.4-3.el7 libffi.x86_64 0:3.0.13-19.el7
libgcc.x86_64 0:4.8.5-44.el7 libgcrypt.x86_64 0:1.5.3-14.el7 libgpg-error.x86_64 0:1.12-3.el7
libidn.x86_64 0:1.28-4.el7 libmnl.x86_64 0:1.0.3-7.el7 libmount.x86_64 0:2.23.2-65.el7_9.1
libnetfilter_conntrack.x86_64 0:1.0.6-1.el7_3 libnfnetlink.x86_64 0:1.0.1-4.el7 libpwquality.x86_64 0:1.2.3-5.el7
libselinux.x86_64 0:2.5-15.el7 libselinux-utils.x86_64 0:2.5-15.el7 libsemanage.x86_64 0:2.5-14.el7
libsepol.x86_64 0:2.5-10.el7 libsmartcols.x86_64 0:2.23.2-65.el7_9.1 libssh2.x86_64 0:1.8.0-4.el7
libstdc++.x86_64 0:4.8.5-44.el7 libtasn1.x86_64 0:4.10-1.el7 libuser.x86_64 0:0.60-9.el7
libutempter.x86_64 0:1.1.6-4.el7 libuuid.x86_64 0:2.23.2-65.el7_9.1 libverto.x86_64 0:0.2.5-4.el7
libxml2.x86_64 0:2.9.1-6.el7_9.6 logrotate.x86_64 0:3.8.6-19.el7 lua.x86_64 0:5.1.4-15.el7
lz4.x86_64 0:1.8.3-1.el7 ncurses.x86_64 0:5.9-14.20130511.el7_4 ncurses-base.noarch 0:5.9-14.20130511.el7_4
ncurses-libs.x86_64 0:5.9-14.20130511.el7_4 nspr.x86_64 0:4.32.0-1.el7_9 nss.x86_64 0:3.67.0-4.el7_9
nss-pem.x86_64 0:1.0.3-7.el7 nss-softokn.x86_64 0:3.67.0-3.el7_9 nss-softokn-freebl.x86_64 0:3.67.0-3.el7_9
nss-sysinit.x86_64 0:3.67.0-4.el7_9 nss-tools.x86_64 0:3.67.0-4.el7_9 nss-util.x86_64 0:3.67.0-1.el7_9
openldap.x86_64 0:2.4.44-25.el7_9 openssh.x86_64 0:7.4p1-22.el7_9 openssl-libs.x86_64 1:1.0.2k-25.el7_9
p11-kit.x86_64 0:0.23.5-3.el7 p11-kit-trust.x86_64 0:0.23.5-3.el7 pam.x86_64 0:1.1.8-23.el7
pcre.x86_64 0:8.32-17.el7 pinentry.x86_64 0:0.8.1-17.el7 pkgconfig.x86_64 1:0.27.1-4.el7
popt.x86_64 0:1.13-16.el7 procps-ng.x86_64 0:3.3.10-28.el7 pth.x86_64 0:2.0.7-23.el7
pygpgme.x86_64 0:0.3-9.el7 pyliblzma.x86_64 0:0.5.3-11.el7 python.x86_64 0:2.7.5-90.el7
python-iniparse.noarch 0:0.4-9.el7 python-libs.x86_64 0:2.7.5-90.el7 python-pycurl.x86_64 0:7.19.0-19.el7
python-urlgrabber.noarch 0:3.10-10.el7 pyxattr.x86_64 0:0.5.1-5.el7 qrencode-libs.x86_64 0:3.4.1-3.el7
readline.x86_64 0:6.2-11.el7 rpm.x86_64 0:4.11.3-48.el7_9 rpm-build-libs.x86_64 0:4.11.3-48.el7_9
rpm-libs.x86_64 0:4.11.3-48.el7_9 rpm-python.x86_64 0:4.11.3-48.el7_9 sed.x86_64 0:4.2.2-7.el7
setup.noarch 0:2.8.71-11.el7 shadow-utils.x86_64 2:4.6-5.el7 shared-mime-info.x86_64 0:1.8-5.el7
sqlite.x86_64 0:3.7.17-8.el7_7.1 systemd.x86_64 0:219-78.el7_9.5 systemd-libs.x86_64 0:219-78.el7_9.5
sysvinit-tools.x86_64 0:2.88-14.dsf.el7 tar.x86_64 2:1.26-35.el7 tcp_wrappers-libs.x86_64 0:7.6-77.el7
tzdata.noarch 0:2022a-1.el7 ustr.x86_64 0:1.0.4-16.el7 util-linux.x86_64 0:2.23.2-65.el7_9.1
xz.x86_64 0:5.2.2-1.el7 xz-libs.x86_64 0:5.2.2-1.el7 yum-metadata-parser.x86_64 0:1.1.4-10.el7
yum-plugin-fastestmirror.noarch 0:1.1.31-54.el7_8 zlib.x86_64 0:1.2.7-19.el7_9

Complete!
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/blkid’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/blkid’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/console’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/console’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/cryptsetup’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/cryptsetup’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/faillock’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/faillock’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/lock’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/lock’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/log’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/log’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/netreport’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/netreport’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/sepermit’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/sepermit’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/setrans’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/setrans’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/systemd’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/systemd’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/user’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/user’ are the same file
mv: ‘/var/cache/lxc/centos/x86_64/7/partial/var/run/utmp’ and ‘/var/cache/lxc/centos/x86_64/7/partial/run/utmp’ are the same file
rmdir: failed to remove ‘/var/cache/lxc/centos/x86_64/7/partial/var/run’: Not a directory
Download complete.
Copy /var/cache/lxc/centos/x86_64/7/rootfs to /var/lib/lxc/mylxc/rootfs ...
Copying rootfs to /var/lib/lxc/mylxc/rootfs ...
sed: can't read /var/lib/lxc/mylxc/rootfs/etc/init/tty.conf: No such file or directory
Storing root password in '/var/lib/lxc/mylxc/tmp_root_pass'
Expiring password for user root.
passwd: Success
//密码设置成功
sed: can't read /var/lib/lxc/mylxc/rootfs/etc/rc.sysinit: No such file or directory
sed: can't read /var/lib/lxc/mylxc/rootfs/etc/rc.d/rc.sysinit: No such file or directory

Container rootfs and config have been created.
Edit the config file to check/enable networking setup.

The temporary root password is stored in:  //密码存在.....临时目录

'/var/lib/lxc/mylxc/tmp_root_pass'

The root password is set up as expired and will require it to be changed
at first login, which you should do as soon as possible. If you lose the
root password or wish to change it without starting the container, you
can change it from the host by running the following command (which will
also reset the expired flag):

chroot /var/lib/lxc/mylxc/rootfs passwd //修改初始化密码



//查看原始密码
[root@ht6 lxc]# cd /var/lib/lxc/mylxc
[root@ht6 mylxc]# ls
config  rootfs  tmp_root_pass

[root@ht6 lxc]# cat /var/lib/lxc/mylxc/tmp_root_pass
Root-mylxc-yR7mg0
 
更改用户 root 的密码 。
[root@ht6 mylxc]# chroot /var/lib/lxc/mylxc/rootfs passwd
Changing password for user root.
New password: 
Retype new password: 
passwd: all authentication tokens updated successfully.
//aozhejin为新密码
启动容器 
[root@ht6 mylxc]# lxc-start -n mylxc -d
 查看容器运行状态
[root@ht6 mylxc]# lxc-info --name mylxc
Name:           mylxc
State:          RUNNING
PID:            127337
IP:             192.168.122.113
CPU use:        0.07 seconds
BlkIO use:      0 bytes
Memory use:     1.03 MiB
KMem use:       0 bytes
Link:           vethR7NUKI  //查看网卡 ip a 
 TX bytes:      1.77 KiB
 RX bytes:      3.16 KiB
 Total bytes:   4.92 KiB
连接容器
[root@ht6 mylxc]# lxc-console -n mylxc

Connected to tty 1
                  Type <Ctrl+a q> to exit the console, <Ctrl+a Ctrl+a> to enter Ctrl+a itself

#敲击Ctrl+a然后q,就可以从容器控制台中退出
进入容器操作
[root@ht6 mylxc]# ssh  192.168.122.113
The authenticity of host '192.168.122.113 (192.168.122.113)' can't be established.
ECDSA key fingerprint is SHA256:CKiHCCP4/AYs+T6yCHTXgGWsyi0i1dAFJSHl5F1hYtY.
ECDSA key fingerprint is MD5:01:e4:24:df:da:03:c4:1f:de:95:86:b6:9d:11:cd:01.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.122.113' (ECDSA) to the list of known hosts.
root@192.168.122.113's password: 
[root@mylxc ~]# ls
[root@mylxc ~]# pwd
/root
[root@mylxc ~]# cd /
[root@mylxc /]# ls
bin  boot  dev  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  selinux  srv  sys  tmp  usr  var
[root@mylxc /]# ls
bin  boot  dev  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  selinux  srv  sys  tmp  usr  var
[root@mylxc /]# cd etc
[root@mylxc etc]# ls
adjtime                  csh.login                group        ld.so.cache     nsswitch.conf.bak  python          rwtab.d        sysconfig
aliases                  dbus-1                   group-       ld.so.conf      openldap           rc0.d           sasl2          sysctl.conf
alternatives             default                  gshadow      ld.so.conf.d    opt                rc1.d           securetty      sysctl.d
bash_completion.d        depmod.d                 gshadow-     libaudit.conf   os-release         rc2.d           security       systemd
bashrc                   dhcp                     gss          libuser.conf    pam.d              rc3.d           selinux        system-release
binfmt.d                 DIR_COLORS               host.conf    localtime       passwd             rc4.d           services       system-release-cpe
centos-release           DIR_COLORS.256color      hosts        login.defs      passwd-            rc5.d           sestatus.conf  terminfo
centos-release-upstream  DIR_COLORS.lightbgcolor  hosts.allow  logrotate.conf  pkcs11             rc6.d           shadow         tmpfiles.d
chkconfig.d              dracut.conf              hosts.deny   logrotate.d     pki                rc.d            shadow-        udev
cron.d                   dracut.conf.d            init.d       machine-id      pm                 rc.local        shells         virc
cron.daily               environment              inittab      modprobe.d      popt.d             redhat-release  skel           X11
cron.deny                exports                  inputrc      modules-load.d  ppp                resolv.conf     ssh            xdg
cron.hourly              filesystems              iproute2     motd            prelink.conf.d     rpc             ssl            xinetd.d
cron.monthly             fstab                    issue        mtab            printcap           rpm             statetab       yum
crontab                  gcrypt                   issue.net    NetworkManager  profile            rsyslog.conf    statetab.d     yum.conf
cron.weekly              gnupg                    krb5.conf    networks        profile.d          rsyslog.d       subgid         yum.repos.d
csh.cshrc                GREP_COLORS              krb5.conf.d  nsswitch.conf   protocols          rwtab           subuid         yum.repos.disabled

[root@mylxc /]# exit
logout
Connection to 192.168.122.113 closed.

查下路由情况

[root@ht6 mylxc]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
....
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
192.168.122.0   0.0.0.0         255.255.255.0   U     0      0        0 virbr0
[root@ht6 mylxc]# ip a
5: virbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 52:54:00:dd:ea:02 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0 //网关地址,网桥会被建立(初始化的时候)
   valid_lft forever preferred_lft forever
6: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 53:52:10:dd:ea:02 brd ff:ff:ff:ff:ff:ff
8: vethR7NUKI@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master virbr0 state UP group default qlen 1000
link/ether fe:b6:b4:c5:82:16 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe81::fcb6:b8ff:fec5:8216/64 scope link
    valid_lft forever preferred_lft forever

查看lxc的根目录

[root@ht6 mylxc]# pwd
/var/lib/lxc/mylxc
[root@ht6 mylxc]# cd ..
[root@ht6 lxc]# ls
 mylxc 
//我们看到和docker很相似了, docker安装在/var/lib/docker

[root@ht6 pods]# pwd
/var/lib/kubelet/pods
[root@ht6 pods]# ls
797525d8-4917-11ec-b44a-060eb4000e9d 7e3d6427-1c3d-11ec-9a8a-065c62000e9f 7e40e59a-1c3d-11ec-9a8a-065c62000e9f
7e3d2e97-1c3d-11ec-9a8a-065c62000e9f 7e3f2319-1c3d-11ec-9a8a-065c62000e9f(pod id)

相关lxc-info命令信息

lxc-info --name 容器名
-s                      #显示状态
-p                      #显示pid
-i                      #显示IP地址
-S                      #显示内存使用
-H                      #显示原始数值
[root@ht6 pods]# lxc-info -p --name mylxc
PID:            127337
[root@ht6 pods]# lxc-info -s --name mylxc
[root@ht6 pods]# lxc-info -S --name mylxc
CPU use:        0.09 seconds
BlkIO use:      0 bytes
Memory use:     1.03 MiB
KMem use:       0 bytes
Link:           vethR7NUKI
 TX bytes:      27.56 KiB
 RX bytes:      94.09 KiB
 Total bytes:   121.65 KiB
lxc-info  -H --name mylxc 

[root@ht6 pods]# lxc-info  -H --name mylxc 
Name:           mylxc
State:          RUNNING  //运行
PID:            127337
IP:             192.168.122.113
CPU use:        86231368
BlkIO use:      0
Memory use:     1077248
KMem use:       0
Link:           vethR7NUKI
 TX bytes:      28217
 RX bytes:      97028
 Total bytes:   125245

停止容器
[root@ht6 mylxc]# lxc-stop -n mylxc  //执行要一会...耐心等待
[root@ht6 pods]# lxc-info --name mylxc
Name: mylxc
State: STOPPED

 [root@ht6 pods]# ping 192.168.122.113  //ping不通了
 PING 192.168.122.113 (192.168.122.113) 56(84) bytes of data.
 ^C
 --- 192.168.122.113 ping statistics ---
 3 packets transmitted, 0 received, 100% packet loss, time 1999ms

[root@ht6 mylxc]# ip a
5: virbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 52:54:00:dd:ea:02 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0 //网关
   valid_lft forever preferred_lft forever
6: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 53:52:10:dd:ea:02 brd ff:ff:ff:ff:ff:ff
//vethR7NUKI@if7这个网卡已经被删除
//但是容器还在

 [root@ht6 mylxc]# pwd
 /var/lib/lxc/mylxc
 [root@ht6 mylxc]# ls
 config rootfs rootfs.dev tmp_root_pass

查看容器运行情况 lxc-top

[root@ht6 bin]# ls lxc-*
lxc-attach lxc-cgroup lxc-clone lxc-console lxc-destroy lxc-freeze lxc-monitor lxc-start lxc-top lxc-unshare lxc-wait
lxc-autostart lxc-checkconfig lxc-config lxc-create lxc-execute lxc-info lxc-snapshot lxc-stop lxc-unfreeze lxc-usernsexec

[root@ht6 bin]# lxc-top  //进入top页面(目前没有容器在运行)
Container            CPU      CPU      CPU      BlkIO        Mem
Name                Used      Sys     User      Total       Used
TOTAL (0 )          0.00     0.00     0.00    0.00       0.00   ^C/usr/bin/lua: /usr/bin/lxc-top:242: interrupted!
stack traceback:
    [C]: in function 'usleep'
    /usr/bin/lxc-top:242: in main chunk
    [C]: ?

我们启动容器,这个时候就不要用lxc-create命令了,容器已被创建,lxc-start 命令来启动容器即可.

[root@ht6 bin]# lxc-start --name mylxc
systemd 219 running in system mode. (+PAM +AUDIT +SELINUX +IMA -APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 -SECCOMP +BLKID +ELFUTILS +KMOD +IDN)
Detected virtualization lxc.
Detected architecture x86-64.

Welcome to CentOS Linux 7 (Core)!

Running in a container, ignoring fstab device entry for /dev/root.
Cannot add dependency job for unit display-manager.service, ignoring: Unit not found.
[  OK  ] Reached target Swap.
[  OK  ] Started Forward Password Requests to Wall Directory Watch.
[  OK  ] Created slice Root Slice.
[  OK  ] Created slice System Slice.
[  OK  ] Created slice system-getty.slice.
[  OK  ] Created slice User and Session Slice.
[  OK  ] Reached target Slices.
[  OK  ] Listening on /dev/initctl Compatibility Named Pipe.
[  OK  ] Reached target Remote File Systems.
[  OK  ] Started Dispatch Password Requests to Console Directory Watch.
[  OK  ] Reached target Paths.
[  OK  ] Listening on Delayed Shutdown Socket.
[  OK  ] Listening on Journal Socket.
         Mounting POSIX Message Queue File System...
         Starting Journal Service...
         Mounting Huge Pages File System...
         Starting Read and set NIS domainname from /etc/sysconfig/network...
[  OK  ] Reached target Local Encrypted Volumes.
         Starting Remount Root and Kernel File Systems...
[  OK  ] Mounted POSIX Message Queue File System.
[  OK  ] Mounted Huge Pages File System.
[  OK  ] Started Read and set NIS domainname from /etc/sysconfig/network.
[  OK  ] Started Journal Service.
[  OK  ] Started Remount Root and Kernel File Systems.
[  OK  ] Reached target Local File Systems (Pre).
         Starting Flush Journal to Persistent Storage...
         Starting Configure read-only root support...
<46>systemd-journald[15]: Received request to flush runtime journal from PID 1
[  OK  ] Started Flush Journal to Persistent Storage.
[  OK  ] Started Configure read-only root support.
[  OK  ] Reached target Local File Systems.
         Starting Create Volatile Files and Directories...
         Starting Load/Save Random Seed...
[  OK  ] Started Load/Save Random Seed.
[  OK  ] Started Create Volatile Files and Directories.
         Starting Update UTMP about System Boot/Shutdown...
[  OK  ] Started Update UTMP about System Boot/Shutdown.
[  OK  ] Reached target System Initialization.
[  OK  ] Listening on D-Bus System Message Bus Socket.
[  OK  ] Reached target Sockets.
[  OK  ] Started Daily Cleanup of Temporary Directories.
[  OK  ] Reached target Timers.
[  OK  ] Reached target Basic System.
         Starting Login Service...
         Starting Permit User Sessions...
[  OK  ] Started D-Bus System Message Bus.
         Starting LSB: Bring up/down networking...
[  OK  ] Started Permit User Sessions.
         Starting Cleanup of Temporary Directories...
[  OK  ] Started Command Scheduler.
[  OK  ] Started Console Getty.
[  OK  ] Reached target Login Prompts.
[  OK  ] Started Login Service.
[  OK  ] Started Cleanup of Temporary Directories.

CentOS Linux 7 (Core)
Kernel 3.10.0-1160.62.1.el7.x86_64 on an x86_64

mylxc login: <28>systemd-sysctl[269]: Failed to write '1' to '/proc/sys/fs/protected_symlinks': Read-only file system
这个时候,回车

CentOS Linux 7 (Core)
Kernel 3.10.0-1160.62.1.el7.x86_64 on an x86_64

mylxc login: root //输入root
Password:  //这里输入密码aozhejin
Last login: Wed Apr 27 06:15:41 from gateway

//在容器内查看ip情况

[root@mylxc ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
9: eth0@if10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether fe:14:93:bc:70:2d brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 192.168.122.113/24 brd 192.168.122.255 scope global dynamic eth0
valid_lft 3084sec preferred_lft 3084sec
inet6 fe80::fc14:93ff:febc:702d/64 scope link
valid_lft forever preferred_lft forever

 

//上面的启动方式会占用整个终端,我们打开另一个终端,进入ht6宿主机

[root@ht6 ~]# lxc-info --name mylxc
Name: mylxc
State: RUNNING
PID: 1144
IP: 192.168.122.113
CPU use: 0.09 seconds
BlkIO use: 0 bytes
Memory use: 1.07 MiB
KMem use: 0 bytes
Link: vethI8CUYK
TX bytes: 1.43 KiB
RX bytes: 12.90 KiB
Total bytes: 14.33 KiB
[root@ht6 ~]# ping 192.168.122.113
PING 192.168.122.113 (192.168.122.113) 56(84) bytes of data.
64 bytes from 192.168.122.113: icmp_seq=1 ttl=64 time=0.039 ms
64 bytes from 192.168.122.113: icmp_seq=2 ttl=64 time=0.027 ms
^C
--- 192.168.122.113 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.027/0.033/0.039/0.006 ms

[root@ht6 ~]# lxc-top

[root@ht6 mylxc]# ip a
5: virbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 52:54:00:dd:ea:02 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0 //网桥
   valid_lft forever preferred_lft forever
6: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 53:52:10:dd:ea:02 brd ff:ff:ff:ff:ff:ff
//5和6都没有动...
8: vethI8CUYK@if9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master virbr0 state UP group default qlen 1000
link/ether fe:b6:b4:c5:82:16 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe81::fcb6:b8ff:fec5:8216/64 scope link
    valid_lft forever preferred_lft forever //第一次是 网络接口名为:vethR7NUKI@if7,由于重新启动了,会删除该网卡,再重新建一个网卡

  所以建网桥就有多种手段利用brctl工具,docker,lxc等

我们查看下网桥情况

[root@ht6 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
docker0        8000.02424e61e204    no        
virbr0         8000.525400ddea02    yes          vethI8CUYK  //查ip a 看下
                                         virbr0-nic

我们查看下网络空间,从两个角度,容器内和宿主机

//容器内
[root@mylxc ~]# lsns
        NS TYPE  NPROCS PID USER COMMAND
4026531837 user      11   1 root /sbin/init
4026532648 mnt       11   1 root /sbin/init
4026532651 uts       11   1 root /sbin/init
4026532652 ipc       11   1 root /sbin/init
4026532653 pid       11   1 root /sbin/init
4026532655 net       11   1 root /sbin/init
//宿主机
[root@ht6 ~]# lsns
        NS TYPE  NPROCS    PID USER COMMAND
....
4026531837 user     190      1 root /usr/lib/systemd/systemd --
4026532648 mnt       10   1144 root /sbin/init
4026532651 uts       10   1144 root /sbin/init
4026532652 ipc       10   1144 root /sbin/init
4026532653 pid       10   1144 root /sbin/init
4026532655 net       10   1144 root /sbin/init

 查看cgroups信息

[root@ht6 /]# cd /sys/fs/cgroup/pids
[root@ht6 pids]# ll
total 0
-rw-r--r--  1 root root 0 Apr 21 08:57 cgroup.clone_children
--w--w--w-  1 root root 0 Apr 21 08:57 cgroup.event_control
-rw-r--r--  1 root root 0 Apr 21 08:57 cgroup.procs
-r--r--r--  1 root root 0 Apr 21 08:57 cgroup.sane_behavior
drwxr-xr-x  3 root root 0 Apr 25 22:56 docker
drwxr-xr-x  3 root root 0 Apr 27 14:55 lxc   //容器
drwxr-xr-x  2 root root 0 Apr 27 13:58 machine.slice
-rw-r--r--  1 root root 0 Apr 21 08:57 notify_on_release
-r--r--r--  1 root root 0 Apr 21 08:57 pids.current
-rw-r--r--  1 root root 0 Apr 21 08:57 release_agent
drwxr-xr-x 72 root root 0 Apr 27 15:46 system.slice
-rw-r--r--  1 root root 0 Apr 21 08:57 tasks
drwxr-xr-x  2 root root 0 Apr 21 08:57 user.slice

[root@ht6 pids]# cd lxc [root@ht6 lxc]# ll total 0 -rw-r--r-- 1 root root 0 Apr 27 14:07 cgroup.clone_children --w--w--w- 1 root root 0 Apr 27 14:07 cgroup.event_control -rw-r--r-- 1 root root 0 Apr 27 14:07 cgroup.procs drwxr-xr-x 2 root root 0 Apr 27 15:02 mylxc -rw-r--r-- 1 root root 0 Apr 27 14:07 notify_on_release -r--r--r-- 1 root root 0 Apr 27 14:07 pids.current -rw-r--r-- 1 root root 0 Apr 27 14:07 pids.max -rw-r--r-- 1 root root 0 Apr 27 14:07 tasks
[root@ht6 lxc]# cd mylxc/ [root@ht6 mylxc]# ll total 0 -rw-r--r-- 1 root root 0 Apr 27 15:02 cgroup.clone_children --w--w--w- 1 root root 0 Apr 27 15:02 cgroup.event_control -rw-r--r-- 1 root root 0 Apr 27 15:02 cgroup.procs -rw-r--r-- 1 root root 0 Apr 27 15:02 notify_on_release -r--r--r-- 1 root root 0 Apr 27 15:02 pids.current -rw-r--r-- 1 root root 0 Apr 27 15:02 pids.max -rw-r--r-- 1 root root 0 Apr 27 15:02 tasks
[root@ht6 mylxc]# lxc-cgroup --version 1.0.11
 
  [root@ht6 ~]# cat /proc/1144/cgroup 
   11:pids:/lxc/mylxc
  10:memory:/lxc/mylxc
  9:cpuset:/lxc/mylxc
  8:blkio:/lxc/mylxc
  7:perf_event:/lxc/mylxc
  6:hugetlb:/lxc/mylxc
  5:devices:/lxc/mylxc
  4:cpuacct,cpu:/lxc/mylxc
  3:net_prio,net_cls:/lxc/mylxc
  2:freezer:/lxc/mylxc
  1:name=systemd:/user.slice/user-0.slice/session-1057.scope
  
   [root@ht6 ~]# cat /proc/cgroups
 #subsys_namehierarchynum_cgroupsenabled
 cpuset951
 cpu4781
 cpuacct4781
 memory10781
 devices5781
 freezer251
 net_cls351
 blkio8781
 perf_event751
 hugetlb651
 pids11781
 net_prio351
  
  [root@ht6 mylxc]# lxc-cgroup --help
  Usage: lxc-cgroup --name=NAME state-object [value]
  Get or set the value of a state object (for example, 'cpuset.cpus')
  in the container's cgroup for the corresponding subsystem.
  Options :
  -n, --name=NAME      NAME of the container
  Common options :
  -o, --logfile=FILE               Output log to FILE instead of stderr
  -l, --logpriority=LEVEL          Set log priority to LEVEL
  -q, --quiet                      Don't produce any output
  -P, --lxcpath=PATH               Use specified container path
  -?, --help                       Give this help list  
      --usage                      Give a short usage message
      --version                    Print the version number
 

我们可以手动修改 /var/lib/lxc/容器名/config 容器配置文件

[root@ht6 mylxc]# pwd
/var/lib/lxc/mylxc
[root@ht6 mylxc]# ll
total 12
-rw-r--r--  1 root root 1017 Apr 27 14:00 config
dr-xr-xr-x 18 root root 4096 Apr 27 15:02 rootfs
lrwxrwxrwx  1 root root   32 Apr 27 15:02 rootfs.dev -> /dev/.lxc/mylxc.4dc433531108bf99
-rw-------  1 root root   18 Apr 27 14:00 tmp_root_pass

每个lxc容器都对应于一个配置文件,该文件可在使用lxc-create、lxc-execute和lxc-start命令时指定,如果通过lxc-create创建的容器,

则会在/var/lib/lxc/container-name/config中保存该配置。配置信息包括:主机名、网络、cgroup信息等。

 

lxc配置文件中每一行采用key=val的形式,每行保存一个配置项。注释以#开头

我们查看下mylxc容器的配置文件/var/lib/lxc/mylxc/config

[root@ht6 mylxc]# cat config
# Template used to create this container: /usr/share/lxc/templates/lxc-centos
# Parameters passed to the template:
# For additional config options, please look at lxc.container.conf(5)
lxc.network.type = veth  //使用peer虚拟网络技术
lxc.network.flags = up   //指定网络的状态
lxc.network.link = virbr0
lxc.network.hwaddr = fe:14:93:bc:70:2d
lxc.rootfs = /var/lib/lxc/mylxc/rootfs

# Include common configuration
lxc.include = /usr/share/lxc/config/centos.common.conf

lxc.arch = x86_64
lxc.utsname = mylxc.node

lxc.autodev = 1

# When using LXC with apparmor, uncomment the next line to run unconfined:
#lxc.aa_profile = unconfined

# example simple networking setup, uncomment to enable  样例的网络配置,不建议启用
#lxc.network.type = veth  //docker就是这种类型
#lxc.network.flags = up   //用于指定网络的状态
#lxc.network.link = lxcbr0
#lxc.network.name = eth0
# Additional example for veth network type
#    static MAC address,
#lxc.network.hwaddr = 00:16:3e:77:52:20
#    persistent veth device name on host side
#        Note: This may potentially collide with other containers of same name!
#lxc.network.veth.pair = v-mylxc-e0  //type这里

这里说下配置容器使用的虚拟化网络
lxc.network.type: 指定具体容器使用哪种的虚拟网络技术类型,其可能值为:
源码conf.h中 enum配置类型
empty LXC_NET_EMPTY 创建仅有回路接口的网络
veth LXC_NET_VETH 1.创建一个对等网络,该网络的一端分配给容器,另一端与lxc.network.link指定的网桥连接。
2.如果未指定网桥,端设备会被创建但是不会连接到任何网桥上。
3.在默认情况下lxc会对容器外的网络设备选择一个名字,
  但是可以通过lxc.network.veth.pair选项来指定。
vlan LXC_NET_VLAN 创建一个由lxc.network.link指定的网卡分配给容器,
vlan的标识符可由lxc.network.vlan.id指定。
phys LXC_NET_PHYS 将由lxc.network.link指定的已经存在的接口(网卡)分配给容器。
macvlan LXC_NET_MACVLAN 创建一个macvlan接口,该接口和由lxc.network.link指定的接口相连接。
none LXC_NET_NONE  
maxconftype LXC_NET_MAXCONFTYPE  

这些都是
lxc.network.flags:指定网络将要执行的动作,可选值为: on: 激活接口 down: 关闭接口 lxc.network.link: 指定用于进行真实网络通信的接口(网卡)。 lxc.network.name: 指定网络接口的名称,在默认情况下,接口名称是动态分配的。 lxc.network.hwaddr: 指定虚拟网络接口(虚拟网卡)的MAC地址,在默认情况下,该值自动分配。 lxc.network.ipv4: 指定分配给虚拟网卡的ipv4地址。 lxc.network.ipv6: 指定分配给虚拟网卡的ipv6地址。
相关的修改可以看 E:\lxc源码\lxc-lxc-1.0.11\lxc-lxc-1.0.11\src\lxc\config.c即该文件(我下载对应版本到了本机)
找对应的版本: https://github.com/lxc/lxc/releases/tag/lxc-1.0.11查看
我们除了ssh ip连接到该机器,也可以通过管理工具连接,连接速度会很快
[root@ht6 mylxc]# lxc-attach --name mylxc
[root@mylxc /]# ls bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var
  [root@mylxc /]# ip a
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
  9: eth0@if10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether fe:14:93:bc:70:2d brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 192.168.122.113/24 brd 192.168.122.255 scope global dynamic eth0
       valid_lft 2674sec preferred_lft 2674sec
    inet6 fe80::fc14:93ff:febc:702d/64 scope link 
       valid_lft forever preferred_lft forever
  [root@mylxc /]# cat /etc/resolv.conf 
  ; generated by /usr/sbin/dhclient-script
  nameserver 192.168.122.1  //dns地址
   [root@mylxc /]# ps -ef
  UID         PID   PPID  C STIME TTY          TIME CMD
  root          1      0  0 07:02 ?        00:00:00 /sbin/init
  root         29      1  0 07:02 ?        00:00:00 /usr/lib/systemd/systemd-logind
  dbus         31      1  0 07:02 ?        00:00:00 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation
  root         34      1  0 07:02 ?        00:00:00 /usr/sbin/crond -n
  root        210      1  0 07:02 ?        00:00:00 /sbin/dhclient -1 -q -lf /var/lib/dhclient/dhclient--eth0.lease -pf /var/run/dhclient-eth0.pid -H mylxc.node eth0
  root        271      1  0 07:02 ?        00:00:00 /usr/sbin/sshd -D
  root        272      1  0 07:02 ?        00:00:00 /usr/sbin/rsyslogd -n
  root        300      1  0 07:08 ?        00:00:00 login -- root  
  root        301      1  0 07:10 ?        00:00:00 /usr/lib/systemd/systemd-journald
  root        302    300  0 07:10 lxc/console 00:00:00 -bash
  root        364      0  0 08:05 ?        00:00:00 /bin/bash
  root        382    364  0 08:07 ?        00:00:00 ps -ef
 
  [root@mylxc /]# cd /etc/sysconfig/network-scripts/
  [root@mylxc network-scripts]# ls
  ifcfg-eth0  ifdown-bnep  ifdown-ipv6  ifdown-ppp     ifdown-tunnel  ifup-bnep  ifup-ipv6  ifup-plusb  ifup-routes  ifup-wireless     
  network-functions-ipv6
  ifcfg-lo    ifdown-eth   ifdown-isdn  ifdown-routes  ifup           ifup-eth   ifup-isdn  ifup-post   ifup-sit     init.ipv6-global
  ifdown      ifdown-ippp  ifdown-post  ifdown-sit     ifup-aliases   ifup-ippp  ifup-plip  ifup-ppp    ifup-tunnel  network-functions
  [root@mylxc network-scripts]# cat ifcfg-eth0 
  DEVICE=eth0
  BOOTPROTO=dhcp
  ONBOOT=yes
  HOSTNAME=mylxc.node
  NM_CONTROLLED=no
  TYPE=Ethernet
  MTU=
  DHCP_HOSTNAME=`hostname`
  //网络情况....
    [root@mylxc network-scripts]# ping localhost
PING localhost (127.0.0.1) 56(84) bytes of data.
64 bytes from localhost (127.0.0.1): icmp_seq=1 ttl=64 time=0.031 ms
^C
--- localhost ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.031/0.031/0.031/0.000 ms
[root@mylxc network-scripts]# ping www.baidu.com
PING www.a.shifen.com (180.97.34.96) 56(84) bytes of data.
64 bytes from 180.97.34.96 (180.97.34.96): icmp_seq=1 ttl=53 time=20.4 ms
64 bytes from 180.97.34.96 (180.97.34.96): icmp_seq=2 ttl=53 time=20.4 ms
64 bytes from 180.97.34.96 (180.97.34.96): icmp_seq=3 ttl=53 time=20.4 ms
^C
--- www.a.shifen.com ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 6052ms
rtt min/avg/max/mdev = 20.422/20.447/20.489/0.029 ms
查看lxc为后缀的
[root@mylxc network-scripts]# find / -name 'lxc' 
/sys/fs/cgroup/memory/lxc
/sys/fs/cgroup/freezer/lxc
/sys/fs/cgroup/hugetlb/lxc
/sys/fs/cgroup/perf_event/lxc
/sys/fs/cgroup/blkio/lxc
/sys/fs/cgroup/pids/lxc
/sys/fs/cgroup/net_cls,net_prio/lxc
/sys/fs/cgroup/cpuset/lxc
/sys/fs/cgroup/cpu,cpuacct/lxc
/sys/fs/cgroup/devices/lxc
/var/cache/lxc
/dev/lxc

   随便可以看看
   https://www.cnblogs.com/aozhejin/p/17143435.html

posted @ 2022-04-29 11:11  jinzi  阅读(1184)  评论(0编辑  收藏  举报