Updated from Linux LTS 3.10.21 to 3.10.22

This commit is contained in:
Nathan
2025-04-07 10:16:56 -05:00
parent 8de512f759
commit fb417c8ab5
200 changed files with 2126 additions and 770 deletions

View File

@@ -613,7 +613,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
int rc = 0;
uid_t uid = from_kuid(&init_user_ns, current_uid());
if (!audit_enabled) {
if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
*ab = NULL;
return rc;
}
@@ -659,6 +659,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
switch (msg_type) {
case AUDIT_GET:
status_set.mask = 0;
status_set.enabled = audit_enabled;
status_set.failure = audit_failure;
status_set.pid = audit_pid;
@@ -670,7 +671,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
&status_set, sizeof(status_set));
break;
case AUDIT_SET:
if (nlh->nlmsg_len < sizeof(struct audit_status))
if (nlmsg_len(nlh) < sizeof(struct audit_status))
return -EINVAL;
status_get = (struct audit_status *)data;
if (status_get->mask & AUDIT_STATUS_ENABLED) {
@@ -832,7 +833,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len));
memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
if ((s.enabled != 0 && s.enabled != 1) ||
(s.log_passwd != 0 && s.log_passwd != 1))
return -EINVAL;
@@ -1536,6 +1537,26 @@ void audit_log_name(struct audit_context *context, struct audit_names *n,
}
}
/* log the audit_names record type */
audit_log_format(ab, " nametype=");
switch(n->type) {
case AUDIT_TYPE_NORMAL:
audit_log_format(ab, "NORMAL");
break;
case AUDIT_TYPE_PARENT:
audit_log_format(ab, "PARENT");
break;
case AUDIT_TYPE_CHILD_DELETE:
audit_log_format(ab, "DELETE");
break;
case AUDIT_TYPE_CHILD_CREATE:
audit_log_format(ab, "CREATE");
break;
default:
audit_log_format(ab, "UNKNOWN");
break;
}
audit_log_fcaps(ab, n);
audit_log_end(ab);
}

View File

@@ -85,6 +85,7 @@ struct audit_names {
struct filename *name;
int name_len; /* number of chars to log */
bool hidden; /* don't log this record */
bool name_put; /* call __putname()? */
unsigned long ino;

View File

@@ -1399,8 +1399,11 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
}
i = 0;
list_for_each_entry(n, &context->names_list, list)
list_for_each_entry(n, &context->names_list, list) {
if (n->hidden)
continue;
audit_log_name(context, n, NULL, i++, &call_panic);
}
/* Send end of event record to help user space know we are finished */
ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -1769,14 +1772,15 @@ void audit_putname(struct filename *name)
* __audit_inode - store the inode and device from a lookup
* @name: name being audited
* @dentry: dentry being audited
* @parent: does this dentry represent the parent?
* @flags: attributes for this particular entry
*/
void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int parent)
unsigned int flags)
{
struct audit_context *context = current->audit_context;
const struct inode *inode = dentry->d_inode;
struct audit_names *n;
bool parent = flags & AUDIT_INODE_PARENT;
if (!context->in_syscall)
return;
@@ -1831,6 +1835,8 @@ out:
if (parent) {
n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
n->type = AUDIT_TYPE_PARENT;
if (flags & AUDIT_INODE_HIDDEN)
n->hidden = true;
} else {
n->name_len = AUDIT_NAME_FULL;
n->type = AUDIT_TYPE_NORMAL;

View File

@@ -91,6 +91,14 @@ static DEFINE_MUTEX(cgroup_mutex);
static DEFINE_MUTEX(cgroup_root_mutex);
/*
* cgroup destruction makes heavy use of work items and there can be a lot
* of concurrent destructions. Use a separate workqueue so that cgroup
* destruction work items don't end up filling up max_active of system_wq
* which may lead to deadlock.
*/
static struct workqueue_struct *cgroup_destroy_wq;
/*
* Generate an array of cgroup subsystem pointers. At boot time, this is
* populated with the built in subsystems, and modular subsystems are
@@ -873,7 +881,7 @@ static void cgroup_free_rcu(struct rcu_head *head)
{
struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
schedule_work(&cgrp->free_work);
queue_work(cgroup_destroy_wq, &cgrp->free_work);
}
static void cgroup_diput(struct dentry *dentry, struct inode *inode)
@@ -4686,6 +4694,22 @@ out:
return err;
}
static int __init cgroup_wq_init(void)
{
/*
* There isn't much point in executing destruction path in
* parallel. Good chunk is serialized with cgroup_mutex anyway.
* Use 1 for @max_active.
*
* We would prefer to do this in cgroup_init() above, but that
* is called before init_workqueues(): so leave this until after.
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
return 0;
}
core_initcall(cgroup_wq_init);
/*
* proc_cgroup_show()
* - Print task's cgroup paths into seq_file, one line for each hierarchy
@@ -4996,7 +5020,7 @@ void __css_put(struct cgroup_subsys_state *css)
v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
if (v == 0)
schedule_work(&css->dput_work);
queue_work(cgroup_destroy_wq, &css->dput_work);
}
EXPORT_SYMBOL_GPL(__css_put);

View File

@@ -984,8 +984,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
need_loop = task_has_mempolicy(tsk) ||
!nodes_intersects(*newmems, tsk->mems_allowed);
if (need_loop)
if (need_loop) {
local_irq_disable();
write_seqcount_begin(&tsk->mems_allowed_seq);
}
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
@@ -993,8 +995,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
tsk->mems_allowed = *newmems;
if (need_loop)
if (need_loop) {
write_seqcount_end(&tsk->mems_allowed_seq);
local_irq_enable();
}
task_unlock(tsk);
}

View File

@@ -1398,7 +1398,11 @@ int hibernate_preallocate_memory(void)
* highmem and non-highmem zones separately.
*/
pages_highmem = preallocate_image_highmem(highmem / 2);
alloc = (count - max_size) - pages_highmem;
alloc = count - max_size;
if (alloc > pages_highmem)
alloc -= pages_highmem;
else
alloc = 0;
pages = preallocate_image_memory(alloc, avail_normal);
if (pages < alloc) {
/* We have exhausted non-highmem pages, try highmem. */

View File

@@ -445,7 +445,7 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
if (!alarmtimer_get_rtcdev())
return -ENOTSUPP;
return -EINVAL;
return hrtimer_get_res(baseid, tp);
}
@@ -462,7 +462,7 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
if (!alarmtimer_get_rtcdev())
return -ENOTSUPP;
return -EINVAL;
*tp = ktime_to_timespec(base->gettime());
return 0;

View File

@@ -367,9 +367,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
static int __register_ftrace_function(struct ftrace_ops *ops)
{
if (unlikely(ftrace_disabled))
return -ENODEV;
if (FTRACE_WARN_ON(ops == &global_ops))
return -EINVAL;
@@ -417,9 +414,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
if (ftrace_disabled)
return -ENODEV;
if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
return -EBUSY;
@@ -2048,10 +2042,15 @@ static void ftrace_startup_enable(int command)
static int ftrace_startup(struct ftrace_ops *ops, int command)
{
bool hash_enable = true;
int ret;
if (unlikely(ftrace_disabled))
return -ENODEV;
ret = __register_ftrace_function(ops);
if (ret)
return ret;
ftrace_start_up++;
command |= FTRACE_UPDATE_CALLS;
@@ -2073,12 +2072,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
return 0;
}
static void ftrace_shutdown(struct ftrace_ops *ops, int command)
static int ftrace_shutdown(struct ftrace_ops *ops, int command)
{
bool hash_disable = true;
int ret;
if (unlikely(ftrace_disabled))
return;
return -ENODEV;
ret = __unregister_ftrace_function(ops);
if (ret)
return ret;
ftrace_start_up--;
/*
@@ -2113,9 +2117,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
}
if (!command || !ftrace_enabled)
return;
return 0;
ftrace_run_update_code(command);
return 0;
}
static void ftrace_startup_sysctl(void)
@@ -3020,16 +3025,13 @@ static void __enable_ftrace_function_probe(void)
if (i == FTRACE_FUNC_HASHSIZE)
return;
ret = __register_ftrace_function(&trace_probe_ops);
if (!ret)
ret = ftrace_startup(&trace_probe_ops, 0);
ret = ftrace_startup(&trace_probe_ops, 0);
ftrace_probe_registered = 1;
}
static void __disable_ftrace_function_probe(void)
{
int ret;
int i;
if (!ftrace_probe_registered)
@@ -3042,9 +3044,7 @@ static void __disable_ftrace_function_probe(void)
}
/* no more funcs left */
ret = __unregister_ftrace_function(&trace_probe_ops);
if (!ret)
ftrace_shutdown(&trace_probe_ops, 0);
ftrace_shutdown(&trace_probe_ops, 0);
ftrace_probe_registered = 0;
}
@@ -4241,12 +4241,15 @@ core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
/* Keep as macros so we do not need to define the commands */
# define ftrace_startup(ops, command) \
({ \
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
0; \
# define ftrace_startup(ops, command) \
({ \
int ___ret = __register_ftrace_function(ops); \
if (!___ret) \
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
___ret; \
})
# define ftrace_shutdown(ops, command) do { } while (0)
# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
@@ -4646,9 +4649,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
mutex_lock(&ftrace_lock);
ret = __register_ftrace_function(ops);
if (!ret)
ret = ftrace_startup(ops, 0);
ret = ftrace_startup(ops, 0);
mutex_unlock(&ftrace_lock);
@@ -4667,9 +4668,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
int ret;
mutex_lock(&ftrace_lock);
ret = __unregister_ftrace_function(ops);
if (!ret)
ftrace_shutdown(ops, 0);
ret = ftrace_shutdown(ops, 0);
mutex_unlock(&ftrace_lock);
return ret;
@@ -4863,6 +4862,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
return NOTIFY_DONE;
}
/* Just a place holder for function graph */
static struct ftrace_ops fgraph_ops __read_mostly = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
FTRACE_OPS_FL_RECURSION_SAFE,
};
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
@@ -4889,7 +4895,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_graph_return = retfunc;
ftrace_graph_entry = entryfunc;
ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
out:
mutex_unlock(&ftrace_lock);
@@ -4906,7 +4912,7 @@ void unregister_ftrace_graph(void)
ftrace_graph_active--;
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);

View File

@@ -295,6 +295,9 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
/* I: attributes used when instantiating standard unbound pools on demand */
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
/* I: attributes used when instantiating ordered pools on demand */
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -4059,7 +4062,7 @@ out_unlock:
static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{
bool highpri = wq->flags & WQ_HIGHPRI;
int cpu;
int cpu, ret;
if (!(wq->flags & WQ_UNBOUND)) {
wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
@@ -4079,6 +4082,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
mutex_unlock(&wq->mutex);
}
return 0;
} else if (wq->flags & __WQ_ORDERED) {
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */
WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name);
return ret;
} else {
return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
}
@@ -4990,13 +5000,23 @@ static int __init init_workqueues(void)
}
}
/* create default unbound wq attrs */
/* create default unbound and ordered wq attrs */
for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
struct workqueue_attrs *attrs;
BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
attrs->nice = std_nice[i];
unbound_std_wq_attrs[i] = attrs;
/*
* An ordered wq should have only one pwq as ordering is
* guaranteed by max_active which is enforced by pwqs.
* Turn off NUMA so that dfl_pwq is used for all nodes.
*/
BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
attrs->nice = std_nice[i];
attrs->no_numa = true;
ordered_wq_attrs[i] = attrs;
}
system_wq = alloc_workqueue("events", 0, 0);