/* kernel/sched/core.c */
static void __sched_fork(
unsigned long clone_flags,
struct task_struct *p)
{
p->on_rq = 0;
p->se.on_rq = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
INIT_LIST_HEAD(
&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = NULL;
#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->stats, 0,
sizeof(p->stats));
#endif
}
static inline void
enqueue_task(struct rq *rq,
struct task_struct *p,
int flags)
{
if (!(flags & ENQUEUE_NOCLOCK))
update_rq_clock(rq);
p->sched_class->enqueue_task(
rq, p, flags);
p->on_rq = TASK_ON_RQ_QUEUED;
}
/* mm/page_alloc.c */
struct page *
__alloc_pages(gfp_t gfp,
unsigned int order,
int preferred_nid,
nodemask_t *nodemask)
{
struct page *page;
unsigned int alloc_flags
= ALLOC_WMARK_LOW;
gfp_t alloc_gfp;
struct alloc_context ac = {};
gfp &= gfp_allowed_mask;
alloc_gfp = gfp;
if (!prepare_alloc_pages(
gfp, order, preferred_nid,
nodemask, &ac,
&alloc_gfp, &alloc_flags))
return NULL;
page = get_page_from_freelist(
alloc_gfp, order,
alloc_flags, &ac);
if (likely(page))
goto out;
page = __alloc_pages_slowpath(
alloc_gfp, order, &ac);
out:
return page;
}
/* kernel/fork.c */
static struct task_struct *
copy_process(struct pid *pid,
int trace, int node,
struct kernel_clone_args *args)
{
int pidfd = -1, retval;
struct task_struct *p;
struct multiprocess_signals
delayed;
retval = -ENOMEM;
p = dup_task_struct(
current, node);
if (!p)
goto fork_out;
retval = copy_creds(p,
args->clone_flags);
if (retval < 0)
goto bad_fork_free;
retval = sched_fork(
args->clone_flags, p);
if (retval)
goto bad_fork_cleanup;
retval = copy_files(
args->clone_flags, p);
if (retval)
goto bad_fork_cleanup;
}
/* net/socket.c */
int __sock_create(
struct net *net,
int family, int type,
int protocol,
struct socket **res,
int kern)
{
int err;
struct socket *sock;
const struct net_proto_family
*pf;
if (family < 0
|| family >= NPROTO)
return -EAFNOSUPPORT;
if (type < 0
|| type >= SOCK_MAX)
return -EINVAL;
sock = sock_alloc();
if (!sock) {
net_warn_ratelimited(
"no more sockets\n");
return -ENFILE;
}
sock->type = type;
pf = rcu_dereference(
net_families[family]);
err = pf->create(net, sock,
protocol, kern);
if (err < 0)
goto out_release;
*res = sock;
return 0;
out_release:
sock_release(sock);
return err;
}
/* kernel/signal.c */
static int sig_task_ignored(
struct task_struct *t,
int sig, bool force)
{
void __user *handler;
handler = sig_handler(t, sig);
if (unlikely(t->signal->flags
& SIGNAL_UNKILLABLE)
&& handler == SIG_DFL
&& !(force &&
sig_kernel_only(sig)))
return 1;
if (handler != SIG_DFL)
return 0;
return sig_kernel_ignore(sig);
}
int force_sig_info(
struct kernel_siginfo *info)
{
unsigned long int flags;
int ret, blocked, ignored;
struct k_sigaction *ka;
spin_lock_irqsave(
¤t->sighand->siglock,
flags);
ka = ¤t->sighand
->action[
info->si_signo - 1];
blocked = sigismember(
¤t->blocked,
info->si_signo);
ret = send_signal_locked(
info->si_signo,
info, current,
PIDTYPE_PID);
spin_unlock_irqrestore(
¤t->sighand->siglock,
flags);
return ret;
}/* kernel/sched/core.c */
static void __sched_fork(
unsigned long clone_flags,
struct task_struct *p)
{
p->on_rq = 0;
p->se.on_rq = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
INIT_LIST_HEAD(
&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = NULL;
#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->stats, 0,
sizeof(p->stats));
#endif
}
static inline void
enqueue_task(struct rq *rq,
struct task_struct *p,
int flags)
{
if (!(flags & ENQUEUE_NOCLOCK))
update_rq_clock(rq);
p->sched_class->enqueue_task(
rq, p, flags);
p->on_rq = TASK_ON_RQ_QUEUED;
}
/* mm/page_alloc.c */
struct page *
__alloc_pages(gfp_t gfp,
unsigned int order,
int preferred_nid,
nodemask_t *nodemask)
{
struct page *page;
unsigned int alloc_flags
= ALLOC_WMARK_LOW;
gfp_t alloc_gfp;
struct alloc_context ac = {};
gfp &= gfp_allowed_mask;
alloc_gfp = gfp;
if (!prepare_alloc_pages(
gfp, order, preferred_nid,
nodemask, &ac,
&alloc_gfp, &alloc_flags))
return NULL;
page = get_page_from_freelist(
alloc_gfp, order,
alloc_flags, &ac);
if (likely(page))
goto out;
page = __alloc_pages_slowpath(
alloc_gfp, order, &ac);
out:
return page;
}
/* kernel/fork.c */
static struct task_struct *
copy_process(struct pid *pid,
int trace, int node,
struct kernel_clone_args *args)
{
int pidfd = -1, retval;
struct task_struct *p;
struct multiprocess_signals
delayed;
retval = -ENOMEM;
p = dup_task_struct(
current, node);
if (!p)
goto fork_out;
retval = copy_creds(p,
args->clone_flags);
if (retval < 0)
goto bad_fork_free;
retval = sched_fork(
args->clone_flags, p);
if (retval)
goto bad_fork_cleanup;
retval = copy_files(
args->clone_flags, p);
if (retval)
goto bad_fork_cleanup;
}
/* net/socket.c */
int __sock_create(
struct net *net,
int family, int type,
int protocol,
struct socket **res,
int kern)
{
int err;
struct socket *sock;
const struct net_proto_family
*pf;
if (family < 0
|| family >= NPROTO)
return -EAFNOSUPPORT;
if (type < 0
|| type >= SOCK_MAX)
return -EINVAL;
sock = sock_alloc();
if (!sock) {
net_warn_ratelimited(
"no more sockets\n");
return -ENFILE;
}
sock->type = type;
pf = rcu_dereference(
net_families[family]);
err = pf->create(net, sock,
protocol, kern);
if (err < 0)
goto out_release;
*res = sock;
return 0;
out_release:
sock_release(sock);
return err;
}
/* kernel/signal.c */
static int sig_task_ignored(
struct task_struct *t,
int sig, bool force)
{
void __user *handler;
handler = sig_handler(t, sig);
if (unlikely(t->signal->flags
& SIGNAL_UNKILLABLE)
&& handler == SIG_DFL
&& !(force &&
sig_kernel_only(sig)))
return 1;
if (handler != SIG_DFL)
return 0;
return sig_kernel_ignore(sig);
}
int force_sig_info(
struct kernel_siginfo *info)
{
unsigned long int flags;
int ret, blocked, ignored;
struct k_sigaction *ka;
spin_lock_irqsave(
¤t->sighand->siglock,
flags);
ka = ¤t->sighand
->action[
info->si_signo - 1];
blocked = sigismember(
¤t->blocked,
info->si_signo);
ret = send_signal_locked(
info->si_signo,
info, current,
PIDTYPE_PID);
spin_unlock_irqrestore(
¤t->sighand->siglock,
flags);
return ret;
}