summaryrefslogtreecommitdiff
path: root/src/core/cgroup.c
diff options
context:
space:
mode:
authorLennart Poettering <lennart@poettering.net>2017-09-07 16:31:01 +0200
committerSven Eden <yamakuzure@gmx.net>2017-11-21 07:14:23 +0100
commitee0cefd5da6939c406f0d421d6115fac4dd78535 (patch)
treebe0a55a7324a01996f3ecd236dc081cd1251821d /src/core/cgroup.c
parente5ff123fa4c23ed39685f8f8dc7f77f1627a81fd (diff)
cgroup: refuse to return accounting data if accounting isn't turned on
We used to be a bit sloppy on this, and handed out accounting data even for units where accounting wasn't explicitly enabled. Let's be stricter here, so that we know the accounting data is actually fully valid. This is necessary, as the accounting data is no longer stored exclusively in cgroupfs, but is partly maintained external of that, and flushed during unit starts. We should hence only expose accounting data we really know is fully current.
Diffstat (limited to 'src/core/cgroup.c')
-rw-r--r--src/core/cgroup.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/src/core/cgroup.c b/src/core/cgroup.c
index 0ca80d975..c1e51a7f6 100644
--- a/src/core/cgroup.c
+++ b/src/core/cgroup.c
@@ -2145,11 +2145,18 @@ int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
#if 0 /// UNNEEDED by elogind
int unit_get_memory_current(Unit *u, uint64_t *ret) {
_cleanup_free_ char *v = NULL;
+ CGroupContext *cc;
int r;
assert(u);
assert(ret);
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->memory_accounting)
+ return -ENODATA;
+
if (!u->cgroup_path)
return -ENODATA;
@@ -2173,11 +2180,18 @@ int unit_get_memory_current(Unit *u, uint64_t *ret) {
int unit_get_tasks_current(Unit *u, uint64_t *ret) {
_cleanup_free_ char *v = NULL;
+ CGroupContext *cc;
int r;
assert(u);
assert(ret);
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->tasks_accounting)
+ return -ENODATA;
+
if (!u->cgroup_path)
return -ENODATA;
@@ -2244,6 +2258,7 @@ static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
}
int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
+ CGroupContext *cc;
nsec_t ns;
int r;
@@ -2253,6 +2268,12 @@ int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
* started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
* call this function with a NULL return value. */
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->cpu_accounting)
+ return -ENODATA;
+
r = unit_get_cpu_usage_raw(u, &ns);
if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
/* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
@@ -2282,6 +2303,7 @@ int unit_get_ip_accounting(
CGroupIPAccountingMetric metric,
uint64_t *ret) {
+ CGroupContext *cc;
uint64_t value;
int fd, r;
@@ -2290,6 +2312,19 @@ int unit_get_ip_accounting(
assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
assert(ret);
+ /* IP accounting is currently not recursive, and hence we refuse to return any data for slice nodes. Slices are
+ * inner cgroup nodes and hence have no processes directly attached, hence their counters would be zero
+ * anyway. And if we block this now we can later open this up, if the kernel learns recursive BPF cgroup
+ * filters. */
+ if (u->type == UNIT_SLICE)
+ return -ENODATA;
+
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->ip_accounting)
+ return -ENODATA;
+
fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
u->ip_accounting_ingress_map_fd :
u->ip_accounting_egress_map_fd;