二、进程

1.在Linux中轻量级进程就是线程,轻量级进程之间可以共享资源。

  进程描述符:task_struct 存放一个进程相关的所有信息。可以说一个进程描述符就代表一个进程。

  进程的七种状态:可运行状态、可中断等待状态、不可中断等待状态、暂停状态、跟踪状态、僵死状态、僵死撤销状态

  进程标识符(PID)

  进程描述符存放在动态内存中,便于内核管理。内核将线程描述符(thread_info)和内核态的进程堆栈一起存放于8K字节的两个页框中。

  线程描述符和进程描述符相互关联,即分别存在于对方的字段中。

  current宏标识运行在当前CPU上的进程。

  进程链表:双向链表,将所有进程的描述符链接起来。进程链表的头是init_task描述符,即所谓的0进程。

  Linux为每种进程优先权都建立一个对应的链表。prio_array_t 数据结构存放这些链表。

  进程间的关系:

    进程描述符中表示进程亲属关系的字段描述:

      real_parent、parent、children、sibling

  内核为加速从PID导出对应的进程描述符指针引入了4个散列表:PID,TGID,PGID,SID

  Linux利用链表来处理冲突的PID:每一个表项由冲突的进程描述符组成的双向链表。

  等待队列:当进程需要等待某个事件的发生而需要睡眠时,则当前进程就把自己放进合适的等待队列,等待内核唤醒他们。

  两种睡眠进程:互斥进程和非互斥进程

  进程资源的限制

2.进程切换

  硬件上下文切换,任务状态段的切换

  执行进程的切换:  http://home.ustc.edu.cn/~hchunhui/linux_sched.html#sec10

  保存和加载FPU、MMX、XMM寄存器的值

3.创建进程

   创建进程时,子进程共享父进程的大多数资源。

    通过写时复制技术允许父子进程读相同的物理页,只有当子进程需要实际写物理页时才给子进程分配新的物理页。

    轻量级进程允许父子进程共享在内核的很多数据结构,如页表、打开文件表、信号处理等。

    vfork()系统调用创建的子进程共享其父进程的内存地址空间,并且阻塞父进程的执行,直到子进程退出或子进程分离父进程为止。

  clone()、fork()、vfork()系统调用

    clone():创建轻量级进程。

        参数: fn  -----子进程的执行函数。该函数返回时子进程终止。函数的返回值代表子进程的退出代码。

           arg -----指向传递给fn()函数的参数

           flags -----低字节指定子进程结束时发送到父进程的信号代码。

           child-stack -----表示把用户态堆栈指针赋给子进程的esp寄存器。调用进程应该为子进程分配新的堆栈

           tls -----表示线程局部存储段数据结构的地址

           ptid -----表示父进程的用户态变量地址,父进程具有与子进程相同的PID

           ctid -----表示子进程的用户态变量地址,该进程具有这一类进程的PID

     根据下列Linux源码可以看出clone()、fork()、vfork()、kernel_thread()、do_fork() 函数都是由_do_fork()函数实现的,区别仅在于传递的参数不同。

  1 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
  2          int __user *, parent_tidptr,
  3          int __user *, child_tidptr,
  4          unsigned long, tls)
  5
  6 {
  7     return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls);
  8 }
  9 /*
 10  * Create a kernel thread.
 11  */
 12 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
 13 {
 14     return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
 15         (unsigned long)arg, NULL, NULL, 0);
 16 }
 17 
 18 #ifdef __ARCH_WANT_SYS_FORK
 19 SYSCALL_DEFINE0(fork)
 20 {
 21 #ifdef CONFIG_MMU
 22     return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0);
 23 #else
 24     /* can not support in nommu mode */
 25     return -EINVAL;
 26 #endif
 27 }
 28 #endif
 29 
 30 #ifdef __ARCH_WANT_SYS_VFORK
 31 SYSCALL_DEFINE0(vfork)
 32 {
 33     return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
 34             0, NULL, NULL, 0);
 35 }
 36 #endif
 37  
 38 
 39 long do_fork(unsigned long clone_flags,
 40           unsigned long stack_start,
 41           unsigned long stack_size,
 42           int __user *parent_tidptr,
 43           int __user *child_tidptr)
 44 {
 45     return _do_fork(clone_flags, stack_start, stack_size,
 46             parent_tidptr, child_tidptr, 0);
 47 }
 48 
 49 
 50 long _do_fork(unsigned long clone_flags,
 51           unsigned long stack_start,
 52           unsigned long stack_size,
 53           int __user *parent_tidptr,
 54           int __user *child_tidptr,
 55           unsigned long tls)
 56 {
 57     struct completion vfork;
 58     struct pid *pid;
 59     struct task_struct *p;
 60     int trace = 0;
 61     long nr;
 62 
 63     /*
 64      * Determine whether and which event to report to ptracer.  When
 65      * called from kernel_thread or CLONE_UNTRACED is explicitly
 66      * requested, no event is reported; otherwise, report if the event
 67      * for the type of forking is enabled.
 68      */
 69     if (!(clone_flags & CLONE_UNTRACED)) {
 70         if (clone_flags & CLONE_VFORK)
 71             trace = PTRACE_EVENT_VFORK;
 72         else if ((clone_flags & CSIGNAL) != SIGCHLD)
 73             trace = PTRACE_EVENT_CLONE;
 74         else
 75             trace = PTRACE_EVENT_FORK;
 76 
 77         if (likely(!ptrace_event_enabled(current, trace)))
 78             trace = 0;
 79     }
 80 
 81     p = copy_process(clone_flags, stack_start, stack_size,
 82              child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
 83     add_latent_entropy();
 84 
 85     if (IS_ERR(p))
 86         return PTR_ERR(p);
 87 
 88     /*
 89      * Do this prior waking up the new thread - the thread pointer
 90      * might get invalid after that point, if the thread exits quickly.
 91      */
 92     trace_sched_process_fork(current, p);
 93 
 94     pid = get_task_pid(p, PIDTYPE_PID);
 95     nr = pid_vnr(pid);
 96 
 97     if (clone_flags & CLONE_PARENT_SETTID)
 98         put_user(nr, parent_tidptr);
 99 
100     if (clone_flags & CLONE_VFORK) {
101         p->vfork_done = &vfork;
102         init_completion(&vfork);
103         get_task_struct(p);
104     }
105 
106     wake_up_new_task(p);
107 
108     /* forking complete and child started to run, tell ptracer */
109     if (unlikely(trace))
110         ptrace_event_pid(trace, pid);
111 
112     if (clone_flags & CLONE_VFORK) {
113         if (!wait_for_vfork_done(p, &vfork))
114             ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
115     }
116 
117     put_pid(pid);
118     return nr;
119 }

    cpoy_process()函数具体实现如下源代码所示:

 

  1 /*
  2  * This creates a new process as a copy of the old one,
  3  * but does not actually start it yet.
  4  *
  5  * It copies the registers, and all the appropriate
  6  * parts of the process environment (as per the clone
  7  * flags). The actual kick-off is left to the caller.
  8  */
  9 static __latent_entropy struct task_struct *copy_process(
 10                     unsigned long clone_flags,
 11                     unsigned long stack_start,
 12                     unsigned long stack_size,
 13                     int __user *child_tidptr,
 14                     struct pid *pid,
 15                     int trace,
 16                     unsigned long tls,
 17                     int node)
 18 {
 19     int retval;
 20     struct task_struct *p;
 21 
 22     /*
 23      * Don't allow sharing the root directory with processes in a different
 24      * namespace
 25      */
 26     if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
 27         return ERR_PTR(-EINVAL);
 28 
 29     if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
 30         return ERR_PTR(-EINVAL);
 31 
 32     /*
 33      * Thread groups must share signals as well, and detached threads
 34      * can only be started up within the thread group.
 35      */
 36     if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
 37         return ERR_PTR(-EINVAL);
 38 
 39     /*
 40      * Shared signal handlers imply shared VM. By way of the above,
 41      * thread groups also imply shared VM. Blocking this case allows
 42      * for various simplifications in other code.
 43      */
 44     if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
 45         return ERR_PTR(-EINVAL);
 46 
 47     /*
 48      * Siblings of global init remain as zombies on exit since they are
 49      * not reaped by their parent (swapper). To solve this and to avoid
 50      * multi-rooted process trees, prevent global and container-inits
 51      * from creating siblings.
 52      */
 53     if ((clone_flags & CLONE_PARENT) &&
 54                 current->signal->flags & SIGNAL_UNKILLABLE)
 55         return ERR_PTR(-EINVAL);
 56 
 57     /*
 58      * If the new process will be in a different pid or user namespace
 59      * do not allow it to share a thread group with the forking task.
 60      */
 61     if (clone_flags & CLONE_THREAD) {
 62         if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
 63             (task_active_pid_ns(current) !=
 64                 current->nsproxy->pid_ns_for_children))
 65             return ERR_PTR(-EINVAL);
 66     }
 67 
 68     retval = -ENOMEM;
 69     p = dup_task_struct(current, node);
 70     if (!p)
 71         goto fork_out;
 72 
 73     /*
 74      * This _must_ happen before we call free_task(), i.e. before we jump
 75      * to any of the bad_fork_* labels. This is to avoid freeing
 76      * p->set_child_tid which is (ab)used as a kthread's data pointer for
 77      * kernel threads (PF_KTHREAD).
 78      */
 79     p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
 80     /*
 81      * Clear TID on mm_release()?
 82      */
 83     p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
 84 
 85     ftrace_graph_init_task(p);
 86 
 87     rt_mutex_init_task(p);
 88 
 89 #ifdef CONFIG_PROVE_LOCKING
 90     DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
 91     DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
 92 #endif
 93     retval = -EAGAIN;
 94     if (atomic_read(&p->real_cred->user->processes) >=
 95             task_rlimit(p, RLIMIT_NPROC)) {
 96         if (p->real_cred->user != INIT_USER &&
 97             !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
 98             goto bad_fork_free;
 99     }
100     current->flags &= ~PF_NPROC_EXCEEDED;
101 
102     retval = copy_creds(p, clone_flags);
103     if (retval < 0)
104         goto bad_fork_free;
105 
106     /*
107      * If multiple threads are within copy_process(), then this check
108      * triggers too late. This doesn't hurt, the check is only there
109      * to stop root fork bombs.
110      */
111     retval = -EAGAIN;
112     if (nr_threads >= max_threads)
113         goto bad_fork_cleanup_count;
114 
115     delayacct_tsk_init(p);    /* Must remain after dup_task_struct() */
116     p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
117     p->flags |= PF_FORKNOEXEC;
118     INIT_LIST_HEAD(&p->children);
119     INIT_LIST_HEAD(&p->sibling);
120     rcu_copy_process(p);
121     p->vfork_done = NULL;
122     spin_lock_init(&p->alloc_lock);
123 
124     init_sigpending(&p->pending);
125 
126     p->utime = p->stime = p->gtime = 0;
127 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
128     p->utimescaled = p->stimescaled = 0;
129 #endif
130     prev_cputime_init(&p->prev_cputime);
131 
132 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
133     seqcount_init(&p->vtime.seqcount);
134     p->vtime.starttime = 0;
135     p->vtime.state = VTIME_INACTIVE;
136 #endif
137 
138 #if defined(SPLIT_RSS_COUNTING)
139     memset(&p->rss_stat, 0, sizeof(p->rss_stat));
140 #endif
141 
142     p->default_timer_slack_ns = current->timer_slack_ns;
143 
144     task_io_accounting_init(&p->ioac);
145     acct_clear_integrals(p);
146 
147     posix_cpu_timers_init(p);
148 
149     p->start_time = ktime_get_ns();
150     p->real_start_time = ktime_get_boot_ns();
151     p->io_context = NULL;
152     p->audit_context = NULL;
153     cgroup_fork(p);
154 #ifdef CONFIG_NUMA
155     p->mempolicy = mpol_dup(p->mempolicy);
156     if (IS_ERR(p->mempolicy)) {
157         retval = PTR_ERR(p->mempolicy);
158         p->mempolicy = NULL;
159         goto bad_fork_cleanup_threadgroup_lock;
160     }
161 #endif
162 #ifdef CONFIG_CPUSETS
163     p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
164     p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
165     seqcount_init(&p->mems_allowed_seq);
166 #endif
167 #ifdef CONFIG_TRACE_IRQFLAGS
168     p->irq_events = 0;
169     p->hardirqs_enabled = 0;
170     p->hardirq_enable_ip = 0;
171     p->hardirq_enable_event = 0;
172     p->hardirq_disable_ip = _THIS_IP_;
173     p->hardirq_disable_event = 0;
174     p->softirqs_enabled = 1;
175     p->softirq_enable_ip = _THIS_IP_;
176     p->softirq_enable_event = 0;
177     p->softirq_disable_ip = 0;
178     p->softirq_disable_event = 0;
179     p->hardirq_context = 0;
180     p->softirq_context = 0;
181 #endif
182 
183     p->pagefault_disabled = 0;
184 
185 #ifdef CONFIG_LOCKDEP
186     p->lockdep_depth = 0; /* no locks held yet */
187     p->curr_chain_key = 0;
188     p->lockdep_recursion = 0;
189     lockdep_init_task(p);
190 #endif
191 
192 #ifdef CONFIG_DEBUG_MUTEXES
193     p->blocked_on = NULL; /* not blocked yet */
194 #endif
195 #ifdef CONFIG_BCACHE
196     p->sequential_io    = 0;
197     p->sequential_io_avg    = 0;
198 #endif
199 
200     /* Perform scheduler related setup. Assign this task to a CPU. */
201     retval = sched_fork(clone_flags, p);
202     if (retval)
203         goto bad_fork_cleanup_policy;
204 
205     retval = perf_event_init_task(p);
206     if (retval)
207         goto bad_fork_cleanup_policy;
208     retval = audit_alloc(p);
209     if (retval)
210         goto bad_fork_cleanup_perf;
211     /* copy all the process information */
212     shm_init_task(p);
213     retval = security_task_alloc(p, clone_flags);
214     if (retval)
215         goto bad_fork_cleanup_audit;
216     retval = copy_semundo(clone_flags, p);
217     if (retval)
218         goto bad_fork_cleanup_security;
219     retval = copy_files(clone_flags, p);
220     if (retval)
221         goto bad_fork_cleanup_semundo;
222     retval = copy_fs(clone_flags, p);
223     if (retval)
224         goto bad_fork_cleanup_files;
225     retval = copy_sighand(clone_flags, p);
226     if (retval)
227         goto bad_fork_cleanup_fs;
228     retval = copy_signal(clone_flags, p);
229     if (retval)
230         goto bad_fork_cleanup_sighand;
231     retval = copy_mm(clone_flags, p);
232     if (retval)
233         goto bad_fork_cleanup_signal;
234     retval = copy_namespaces(clone_flags, p);
235     if (retval)
236         goto bad_fork_cleanup_mm;
237     retval = copy_io(clone_flags, p);
238     if (retval)
239         goto bad_fork_cleanup_namespaces;
240     retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
241     if (retval)
242         goto bad_fork_cleanup_io;
243 
244     if (pid != &init_struct_pid) {
245         pid = alloc_pid(p->nsproxy->pid_ns_for_children);
246         if (IS_ERR(pid)) {
247             retval = PTR_ERR(pid);
248             goto bad_fork_cleanup_thread;
249         }
250     }
251 
252 #ifdef CONFIG_BLOCK
253     p->plug = NULL;
254 #endif
255 #ifdef CONFIG_FUTEX
256     p->robust_list = NULL;
257 #ifdef CONFIG_COMPAT
258     p->compat_robust_list = NULL;
259 #endif
260     INIT_LIST_HEAD(&p->pi_state_list);
261     p->pi_state_cache = NULL;
262 #endif
263     /*
264      * sigaltstack should be cleared when sharing the same VM
265      */
266     if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
267         sas_ss_reset(p);
268 
269     /*
270      * Syscall tracing and stepping should be turned off in the
271      * child regardless of CLONE_PTRACE.
272      */
273     user_disable_single_step(p);
274     clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
275 #ifdef TIF_SYSCALL_EMU
276     clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
277 #endif
278     clear_all_latency_tracing(p);
279 
280     /* ok, now we should be set up.. */
281     p->pid = pid_nr(pid);
282     if (clone_flags & CLONE_THREAD) {
283         p->exit_signal = -1;
284         p->group_leader = current->group_leader;
285         p->tgid = current->tgid;
286     } else {
287         if (clone_flags & CLONE_PARENT)
288             p->exit_signal = current->group_leader->exit_signal;
289         else
290             p->exit_signal = (clone_flags & CSIGNAL);
291         p->group_leader = p;
292         p->tgid = p->pid;
293     }
294 
295     p->nr_dirtied = 0;
296     p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
297     p->dirty_paused_when = 0;
298 
299     p->pdeath_signal = 0;
300     INIT_LIST_HEAD(&p->thread_group);
301     p->task_works = NULL;
302 
303     cgroup_threadgroup_change_begin(current);
304     /*
305      * Ensure that the cgroup subsystem policies allow the new process to be
306      * forked. It should be noted the the new process's css_set can be changed
307      * between here and cgroup_post_fork() if an organisation operation is in
308      * progress.
309      */
310     retval = cgroup_can_fork(p);
311     if (retval)
312         goto bad_fork_free_pid;
313 
314     /*
315      * Make it visible to the rest of the system, but dont wake it up yet.
316      * Need tasklist lock for parent etc handling!
317      */
318     write_lock_irq(&tasklist_lock);
319 
320     /* CLONE_PARENT re-uses the old parent */
321     if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
322         p->real_parent = current->real_parent;
323         p->parent_exec_id = current->parent_exec_id;
324     } else {
325         p->real_parent = current;
326         p->parent_exec_id = current->self_exec_id;
327     }
328 
329     klp_copy_process(p);
330 
331     spin_lock(&current->sighand->siglock);
332 
333     /*
334      * Copy seccomp details explicitly here, in case they were changed
335      * before holding sighand lock.
336      */
337     copy_seccomp(p);
338 
339     /*
340      * Process group and session signals need to be delivered to just the
341      * parent before the fork or both the parent and the child after the
342      * fork. Restart if a signal comes in before we add the new process to
343      * it's process group.
344      * A fatal signal pending means that current will exit, so the new
345      * thread can't slip out of an OOM kill (or normal SIGKILL).
346     */
347     recalc_sigpending();
348     if (signal_pending(current)) {
349         retval = -ERESTARTNOINTR;
350         goto bad_fork_cancel_cgroup;
351     }
352     if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
353         retval = -ENOMEM;
354         goto bad_fork_cancel_cgroup;
355     }
356 
357     if (likely(p->pid)) {
358         ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
359 
360         init_task_pid(p, PIDTYPE_PID, pid);
361         if (thread_group_leader(p)) {
362             init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
363             init_task_pid(p, PIDTYPE_SID, task_session(current));
364 
365             if (is_child_reaper(pid)) {
366                 ns_of_pid(pid)->child_reaper = p;
367                 p->signal->flags |= SIGNAL_UNKILLABLE;
368             }
369 
370             p->signal->leader_pid = pid;
371             p->signal->tty = tty_kref_get(current->signal->tty);
372             /*
373              * Inherit has_child_subreaper flag under the same
374              * tasklist_lock with adding child to the process tree
375              * for propagate_has_child_subreaper optimization.
376              */
377             p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
378                              p->real_parent->signal->is_child_subreaper;
379             list_add_tail(&p->sibling, &p->real_parent->children);
380             list_add_tail_rcu(&p->tasks, &init_task.tasks);
381             attach_pid(p, PIDTYPE_PGID);
382             attach_pid(p, PIDTYPE_SID);
383             __this_cpu_inc(process_counts);
384         } else {
385             current->signal->nr_threads++;
386             atomic_inc(&current->signal->live);
387             atomic_inc(&current->signal->sigcnt);
388             list_add_tail_rcu(&p->thread_group,
389                       &p->group_leader->thread_group);
390             list_add_tail_rcu(&p->thread_node,
391                       &p->signal->thread_head);
392         }
393         attach_pid(p, PIDTYPE_PID);
394         nr_threads++;
395     }
396 
397     total_forks++;
398     spin_unlock(&current->sighand->siglock);
399     syscall_tracepoint_update(p);
400     write_unlock_irq(&tasklist_lock);
401 
402     proc_fork_connector(p);
403     cgroup_post_fork(p);
404     cgroup_threadgroup_change_end(current);
405     perf_event_fork(p);
406 
407     trace_task_newtask(p, clone_flags);
408     uprobe_copy_process(p, clone_flags);
409 
410     return p;
411 
412 bad_fork_cancel_cgroup:
413     spin_unlock(&current->sighand->siglock);
414     write_unlock_irq(&tasklist_lock);
415     cgroup_cancel_fork(p);
416 bad_fork_free_pid:
417     cgroup_threadgroup_change_end(current);
418     if (pid != &init_struct_pid)
419         free_pid(pid);
420 bad_fork_cleanup_thread:
421     exit_thread(p);
422 bad_fork_cleanup_io:
423     if (p->io_context)
424         exit_io_context(p);
425 bad_fork_cleanup_namespaces:
426     exit_task_namespaces(p);
427 bad_fork_cleanup_mm:
428     if (p->mm)
429         mmput(p->mm);
430 bad_fork_cleanup_signal:
431     if (!(clone_flags & CLONE_THREAD))
432         free_signal_struct(p->signal);
433 bad_fork_cleanup_sighand:
434     __cleanup_sighand(p->sighand);
435 bad_fork_cleanup_fs:
436     exit_fs(p); /* blocking */
437 bad_fork_cleanup_files:
438     exit_files(p); /* blocking */
439 bad_fork_cleanup_semundo:
440     exit_sem(p);
441 bad_fork_cleanup_security:
442     security_task_free(p);
443 bad_fork_cleanup_audit:
444     audit_free(p);
445 bad_fork_cleanup_perf:
446     perf_event_free_task(p);
447 bad_fork_cleanup_policy:
448     lockdep_free_task(p);
449 #ifdef CONFIG_NUMA
450     mpol_put(p->mempolicy);
451 bad_fork_cleanup_threadgroup_lock:
452 #endif
453     delayacct_tsk_free(p);
454 bad_fork_cleanup_count:
455     atomic_dec(&p->cred->user->processes);
456     exit_creds(p);
457 bad_fork_free:
458     p->state = TASK_DEAD;
459     put_task_stack(p);
460     free_task(p);
461 fork_out:
462     return ERR_PTR(retval);
463 }

 

4.内核线程

  内核线程与普通进程的区别:

    内核线程只运行在内核态,普通进程既可以运行在内核态也可以运行在用户态。

    内核线程只运行在内核态,只使用大于PAGE_OFFSET(即3GB~4GB之间)的线性地址空间,普通进程可以用4GB地址空间

  创建内核线程:调用kernel_thread()函数,该函数本质也是调用_do_fork()函数。_do_fork(flags|CLONE_VM|CLONE_UNTRACED,0,pregs,0,NULL,NULL)

  进程0--所有进程的祖先,idle进程  进程1--由进程0创建的内核线程执行init()函数

  其他内核线程:keventd、kapmd、kswapd、pdflush、kblockd、ksoftirqd

5.撤销进程

  进程终止:

  系统调用:exit_group() -----> do_group_exit() :终止整个线程组,也是C库函数exit()调用的系统调用

  系统调用:exit() -----> do_exit() :终止某一个线程,不管该线程所属线程组中的其他线程。

6.进程删除

  子进程结束后,父进程必须调用wait()族函数来回收子进程的系统资源。然后内核才会丢弃子进程的进程描述符。

  孤儿进程:父进程先于子进程退出,则子进程成为孤儿进程。内核会强制将孤儿进程设为init进程的子进程来解决孤儿问题。

  release_task()函数:

    该函数从僵死进程的描述符中分离出最后的数据结构,对僵死进程的处理有两种方式:

        1.如果父进程不需要接收子进程的信号,则直接调用do_exit()

        2.如果已经给父进程发送了一个信号,则调用wait4()或waitpid()系统调用

    

 1 void release_task(struct task_struct *p)
 2 {
 3     struct task_struct *leader;
 4     int zap_leader;
 5 repeat:
 6     /* don't need to get the RCU readlock here - the process is dead and
 7      * can't be modifying its own credentials. But shut RCU-lockdep up */
 8     rcu_read_lock();
 9     atomic_dec(&__task_cred(p)->user->processes);
10     rcu_read_unlock();
11 
12     proc_flush_task(p);
13 
14     write_lock_irq(&tasklist_lock);
15     ptrace_release_task(p);
16     __exit_signal(p);
17 
18     /*
19      * If we are the last non-leader member of the thread
20      * group, and the leader is zombie, then notify the
21      * group leader's parent process. (if it wants notification.)
22      */
23     zap_leader = 0;
24     leader = p->group_leader;
25     if (leader != p && thread_group_empty(leader)
26             && leader->exit_state == EXIT_ZOMBIE) {
27         /*
28          * If we were the last child thread and the leader has
29          * exited already, and the leader's parent ignores SIGCHLD,
30          * then we are the one who should release the leader.
31          */
32         zap_leader = do_notify_parent(leader, leader->exit_signal);
33         if (zap_leader)
34             leader->exit_state = EXIT_DEAD;
35     }
36 
37     write_unlock_irq(&tasklist_lock);
38     release_thread(p);
39     call_rcu(&p->rcu, delayed_put_task_struct);
40 
41     p = leader;
42     if (unlikely(zap_leader))
43         goto repeat;
44 }

 

 

 

 

 

 

 

 

 

posted @ 2019-03-20 12:07  ciel-coding杂记  阅读(290)  评论(0编辑  收藏  举报