[8/8] selftests/proc: Assert clock_gettime(CLOCK_BOOTTIME) VS /proc/uptime monotonicity

Message ID 20230222144649.624380-9-frederic@kernel.org
State New
Headers
Series timers/nohz: Fixes and cleanups v3 |

Commit Message

Frederic Weisbecker Feb. 22, 2023, 2:46 p.m. UTC
  The first field of /proc/uptime relies on the CLOCK_BOOTTIME clock which
can also be fetched from clock_gettime() API.

Improve the test coverage while verifying the monotonicity of
CLOCK_BOOTTIME accross both interfaces.

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Wei Li <liwei391@huawei.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Mirsad Goran Todorovac <mirsad.todorovac@alu.unizg.hr>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
---
 .../testing/selftests/proc/proc-uptime-001.c  | 21 ++++++++++++++----
 .../testing/selftests/proc/proc-uptime-002.c  | 22 +++++++++++++++----
 tools/testing/selftests/proc/proc-uptime.h    | 12 ++++++++++
 3 files changed, 47 insertions(+), 8 deletions(-)
  

Comments

Mirsad Todorovac March 8, 2023, 3:59 p.m. UTC | #1
Hi Frederic,

On 2/22/23 15:46, Frederic Weisbecker wrote:
> The first field of /proc/uptime relies on the CLOCK_BOOTTIME clock which
> can also be fetched from clock_gettime() API.
> 
> Improve the test coverage while verifying the monotonicity of
> CLOCK_BOOTTIME accross both interfaces.
> 
> Suggested-by: Thomas Gleixner <tglx@linutronix.de>
> Cc: Yu Liao <liaoyu15@huawei.com>
> Cc: Hillf Danton <hdanton@sina.com>
> Cc: Ingo Molnar <mingo@kernel.org>
> Cc: Wei Li <liwei391@huawei.com>
> Cc: Alexey Dobriyan <adobriyan@gmail.com>
> Cc: Mirsad Goran Todorovac <mirsad.todorovac@alu.unizg.hr>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
> ---
>   .../testing/selftests/proc/proc-uptime-001.c  | 21 ++++++++++++++----
>   .../testing/selftests/proc/proc-uptime-002.c  | 22 +++++++++++++++----
>   tools/testing/selftests/proc/proc-uptime.h    | 12 ++++++++++
>   3 files changed, 47 insertions(+), 8 deletions(-)
> 
> diff --git a/tools/testing/selftests/proc/proc-uptime-001.c b/tools/testing/selftests/proc/proc-uptime-001.c
> index 35bddd9dd60b..f335eec5067e 100644
> --- a/tools/testing/selftests/proc/proc-uptime-001.c
> +++ b/tools/testing/selftests/proc/proc-uptime-001.c
> @@ -13,9 +13,9 @@
>    * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
>    * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
>    */
> -// Test that boottime value in /proc/uptime increments monotonically.
> -// We don't test idle time monotonicity due to broken iowait task
> -// counting, cf: comment above get_cpu_idle_time_us()
> +// Test that boottime value in /proc/uptime and CLOCK_BOOTTIME increment
> +// monotonically. We don't test idle time monotonicity due to broken iowait
> +// task counting, cf: comment above get_cpu_idle_time_us()
>   #undef NDEBUG
>   #include <assert.h>
>   #include <stdint.h>
> @@ -27,7 +27,7 @@
>   
>   int main(void)
>   {
> -	uint64_t start, u0, u1;
> +	uint64_t start, u0, u1, c0, c1;
>   	int fd;
>   
>   	fd = open("/proc/uptime", O_RDONLY);
> @@ -35,10 +35,23 @@ int main(void)
>   
>   	u0 = proc_uptime(fd);
>   	start = u0;
> +	c0 = clock_boottime();
> +
>   	do {
>   		u1 = proc_uptime(fd);
> +		c1 = clock_boottime();
> +
> +		/* Is /proc/uptime monotonic ? */
>   		assert(u1 >= u0);
> +
> +		/* Is CLOCK_BOOTTIME monotonic ? */
> +		assert(c1 >= c0);
> +
> +		/* Is CLOCK_BOOTTIME VS /proc/uptime monotonic ? */
> +		assert(c0 >= u0);
> +
>   		u0 = u1;
> +		c0 = c1;
>   	} while (u1 - start < 100);
>   
>   	return 0;
> diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
> index 7ad79d5eaa84..ae453daa96c1 100644
> --- a/tools/testing/selftests/proc/proc-uptime-002.c
> +++ b/tools/testing/selftests/proc/proc-uptime-002.c
> @@ -13,9 +13,10 @@
>    * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
>    * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
>    */
> -// Test that boottime value in /proc/uptime increments monotonically
> -// while shifting across CPUs. We don't test idle time monotonicity
> -// due to broken iowait task counting, cf: comment above get_cpu_idle_time_us()
> +// Test that boottime value in /proc/uptime and CLOCK_BOOTTIME increment
> +// monotonically while shifting across CPUs. We don't test idle time
> +// monotonicity due to broken iowait task counting, cf: comment above
> +// get_cpu_idle_time_us()
>   #undef NDEBUG
>   #include <assert.h>
>   #include <errno.h>
> @@ -43,10 +44,10 @@ static inline int sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned lo
>   
>   int main(void)
>   {
> +	uint64_t u0, u1, c0, c1;
>   	unsigned int len;
>   	unsigned long *m;
>   	unsigned int cpu;
> -	uint64_t u0, u1;
>   	int fd;
>   
>   	/* find out "nr_cpu_ids" */
> @@ -62,6 +63,8 @@ int main(void)
>   	assert(fd >= 0);
>   
>   	u0 = proc_uptime(fd);
> +	c0 = clock_boottime();
> +
>   	for (cpu = 0; cpu < len * 8; cpu++) {
>   		memset(m, 0, len);
>   		m[cpu / (8 * sizeof(unsigned long))] |= 1UL << (cpu % (8 * sizeof(unsigned long)));
> @@ -70,8 +73,19 @@ int main(void)
>   		sys_sched_setaffinity(0, len, m);
>   
>   		u1 = proc_uptime(fd);
> +		c1 = clock_boottime();
> +
> +		/* Is /proc/uptime monotonic ? */
>   		assert(u1 >= u0);
> +
> +		/* Is CLOCK_BOOTTIME monotonic ? */
> +		assert(c1 >= c0);
> +
> +		/* Is CLOCK_BOOTTIME VS /proc/uptime monotonic ? */
> +		assert(c0 >= u0);
> +
>   		u0 = u1;
> +		c0 = c1;
>   	}
>   
>   	return 0;
> diff --git a/tools/testing/selftests/proc/proc-uptime.h b/tools/testing/selftests/proc/proc-uptime.h
> index ca55abeb0ccc..730cce4a3d73 100644
> --- a/tools/testing/selftests/proc/proc-uptime.h
> +++ b/tools/testing/selftests/proc/proc-uptime.h
> @@ -19,9 +19,21 @@
>   #include <string.h>
>   #include <stdlib.h>
>   #include <unistd.h>
> +#include <time.h>
>   
>   #include "proc.h"
>   
> +static uint64_t clock_boottime(void)
> +{
> +	struct timespec ts;
> +	int err;
> +
> +	err = clock_gettime(CLOCK_BOOTTIME, &ts);
> +	assert(err >= 0);
> +
> +	return (ts.tv_sec * 100) + (ts.tv_nsec / 10000000);
> +}
> +
>   static uint64_t proc_uptime(int fd)
>   {
>   	uint64_t val1, val2;

 From what I see, you round the CLOCK_BOOTIME time to 1/100ths of a second.

A simple program that queries clock_getres() on system clocks gives this
result:

clock_res [CLOCK_REALTIME] = 0.000000001s
clock_res [CLOCK_REALTIME_COARSE] = 0.004000000s
clock_res [CLOCK_MONOTONIC] = 0.000000001s
clock_res [CLOCK_MONOTONIC_COARSE] = 0.004000000s
clock_res [CLOCK_MONOTONIC_RAW] = 0.000000001s
clock_res [CLOCK_BOOTTIME] = 0.000000001s
clock_res [CLOCK_PROCESS_CPUTIME_ID] = 0.000000001s
clock_res [CLOCK_THREAD_CPUTIME_ID] = 0.000000001s

A number of programs may depend i.e. on CLOCK_REALTIME or CLOCK_BOOTIME to give
different result each nanosecond.

I came across this when generating nonces for HMACs according to recommendations
from RFC 4086 "Randomness Requirements for Security".

If the value of CLOCK_BOOTTIME or CLOCK_REALTIME is incremented not in what
clock_getres() gives, but at best in 1/100th of second instead, that would seriously
weaken our security (for as you know, in many cryptographic uses nonces need not
be random, but MUST NOT ever repeat nor go backwards).

Could we modify the test for this assumption, or is the assumption wrong?

Here the test for CLOCK_PROCESS_CPUTIME_ID and CLOCK_THREAD_CPUTIME_ID
increasing monotonically with guaranteed increased value of nanoseconds
would also seem good.

Maybe this is already covered in another test case, but it seems that all
clocks should be guaranteed to be monotonically increasing, and increased
at least by one nanosecond with each syscall, or many algorithms would break.

In other words, CLOCK_BOOTTIME should be tested to increase monotonically in
the resolution given by clock_getres (CLOCK_BOOTTIME, &tv_res), not in 1/100ths
of second (IMHO).

Am I wrong in my assumption?

Thank you,
Mirsad
  
Frederic Weisbecker March 21, 2023, 12:44 p.m. UTC | #2
On Wed, Mar 08, 2023 at 04:59:41PM +0100, Mirsad Todorovac wrote:
> On 2/22/23 15:46, Frederic Weisbecker wrote:
> From what I see, you round the CLOCK_BOOTIME time to 1/100ths of a second.
> 
> A simple program that queries clock_getres() on system clocks gives this
> result:
> 
> clock_res [CLOCK_REALTIME] = 0.000000001s
> clock_res [CLOCK_REALTIME_COARSE] = 0.004000000s
> clock_res [CLOCK_MONOTONIC] = 0.000000001s
> clock_res [CLOCK_MONOTONIC_COARSE] = 0.004000000s
> clock_res [CLOCK_MONOTONIC_RAW] = 0.000000001s
> clock_res [CLOCK_BOOTTIME] = 0.000000001s
> clock_res [CLOCK_PROCESS_CPUTIME_ID] = 0.000000001s
> clock_res [CLOCK_THREAD_CPUTIME_ID] = 0.000000001s
> 
> A number of programs may depend i.e. on CLOCK_REALTIME or CLOCK_BOOTIME to give
> different result each nanosecond.
> 
> I came across this when generating nonces for HMACs according to recommendations
> from RFC 4086 "Randomness Requirements for Security".
> 
> If the value of CLOCK_BOOTTIME or CLOCK_REALTIME is incremented not in what
> clock_getres() gives, but at best in 1/100th of second instead, that would seriously
> weaken our security (for as you know, in many cryptographic uses nonces need not
> be random, but MUST NOT ever repeat nor go backwards).
> 
> Could we modify the test for this assumption, or is the assumption wrong?
> 
> Here the test for CLOCK_PROCESS_CPUTIME_ID and CLOCK_THREAD_CPUTIME_ID
> increasing monotonically with guaranteed increased value of nanoseconds
> would also seem good.
> 
> Maybe this is already covered in another test case, but it seems that all
> clocks should be guaranteed to be monotonically increasing, and increased
> at least by one nanosecond with each syscall, or many algorithms would break.
> 
> In other words, CLOCK_BOOTTIME should be tested to increase monotonically in
> the resolution given by clock_getres (CLOCK_BOOTTIME, &tv_res), not in 1/100ths
> of second (IMHO).

Maybe but verifying a clock against its own resolution is another testcase. Here the
point is to verify that CLOCK_BOOTTIME is monotonic against /proc/uptime, and
since /proc/uptime has an 1/100 second resolution, rounding clock_gettime(CLOCK_BOOTTIME)
result down to that is the best we can do.

Thanks.
  
Mirsad Todorovac March 26, 2023, 8:03 p.m. UTC | #3
On 21. 03. 2023. 13:44, Frederic Weisbecker wrote:
> On Wed, Mar 08, 2023 at 04:59:41PM +0100, Mirsad Todorovac wrote:
>> On 2/22/23 15:46, Frederic Weisbecker wrote:
>> From what I see, you round the CLOCK_BOOTIME time to 1/100ths of a second.
>>
>> A simple program that queries clock_getres() on system clocks gives this
>> result:
>>
>> clock_res [CLOCK_REALTIME] = 0.000000001s
>> clock_res [CLOCK_REALTIME_COARSE] = 0.004000000s
>> clock_res [CLOCK_MONOTONIC] = 0.000000001s
>> clock_res [CLOCK_MONOTONIC_COARSE] = 0.004000000s
>> clock_res [CLOCK_MONOTONIC_RAW] = 0.000000001s
>> clock_res [CLOCK_BOOTTIME] = 0.000000001s
>> clock_res [CLOCK_PROCESS_CPUTIME_ID] = 0.000000001s
>> clock_res [CLOCK_THREAD_CPUTIME_ID] = 0.000000001s
>>
>> A number of programs may depend i.e. on CLOCK_REALTIME or CLOCK_BOOTIME to give
>> different result each nanosecond.
>>
>> I came across this when generating nonces for HMACs according to recommendations
>> from RFC 4086 "Randomness Requirements for Security".
>>
>> If the value of CLOCK_BOOTTIME or CLOCK_REALTIME is incremented not in what
>> clock_getres() gives, but at best in 1/100th of second instead, that would seriously
>> weaken our security (for as you know, in many cryptographic uses nonces need not
>> be random, but MUST NOT ever repeat nor go backwards).
>>
>> Could we modify the test for this assumption, or is the assumption wrong?
>>
>> Here the test for CLOCK_PROCESS_CPUTIME_ID and CLOCK_THREAD_CPUTIME_ID
>> increasing monotonically with guaranteed increased value of nanoseconds
>> would also seem good.
>>
>> Maybe this is already covered in another test case, but it seems that all
>> clocks should be guaranteed to be monotonically increasing, and increased
>> at least by one nanosecond with each syscall, or many algorithms would break.
>>
>> In other words, CLOCK_BOOTTIME should be tested to increase monotonically in
>> the resolution given by clock_getres (CLOCK_BOOTTIME, &tv_res), not in 1/100ths
>> of second (IMHO).
> 
> Maybe but verifying a clock against its own resolution is another testcase. Here the
> point is to verify that CLOCK_BOOTTIME is monotonic against /proc/uptime, and
> since /proc/uptime has an 1/100 second resolution, rounding clock_gettime(CLOCK_BOOTTIME)
> result down to that is the best we can do.
> 
> Thanks.

Hi Frederic,

Thank you for explaining that.

I've read somewhere (forgot the link) that clock_gettime(CLOCK_*) clocks should
be guaranteed to return at least a nanosecond increased value for a PID or TID
from call to call.

Returning the same value would break some algorithms that depend on monotonous
increase of time - for example, some naive implementations of nonce generators.

I believe this is worth assuring in tests, or possibly some naive crypto
would reveal its pre-shared secrets in consecutive calls (Please see RFC 4086,
"Randomness Requirements for Security" for greater detail in explanation.

Best regards,
Mirsad
  

Patch

diff --git a/tools/testing/selftests/proc/proc-uptime-001.c b/tools/testing/selftests/proc/proc-uptime-001.c
index 35bddd9dd60b..f335eec5067e 100644
--- a/tools/testing/selftests/proc/proc-uptime-001.c
+++ b/tools/testing/selftests/proc/proc-uptime-001.c
@@ -13,9 +13,9 @@ 
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
-// Test that boottime value in /proc/uptime increments monotonically.
-// We don't test idle time monotonicity due to broken iowait task
-// counting, cf: comment above get_cpu_idle_time_us()
+// Test that boottime value in /proc/uptime and CLOCK_BOOTTIME increment
+// monotonically. We don't test idle time monotonicity due to broken iowait
+// task counting, cf: comment above get_cpu_idle_time_us()
 #undef NDEBUG
 #include <assert.h>
 #include <stdint.h>
@@ -27,7 +27,7 @@ 
 
 int main(void)
 {
-	uint64_t start, u0, u1;
+	uint64_t start, u0, u1, c0, c1;
 	int fd;
 
 	fd = open("/proc/uptime", O_RDONLY);
@@ -35,10 +35,23 @@  int main(void)
 
 	u0 = proc_uptime(fd);
 	start = u0;
+	c0 = clock_boottime();
+
 	do {
 		u1 = proc_uptime(fd);
+		c1 = clock_boottime();
+
+		/* Is /proc/uptime monotonic ? */
 		assert(u1 >= u0);
+
+		/* Is CLOCK_BOOTTIME monotonic ? */
+		assert(c1 >= c0);
+
+		/* Is CLOCK_BOOTTIME VS /proc/uptime monotonic ? */
+		assert(c0 >= u0);
+
 		u0 = u1;
+		c0 = c1;
 	} while (u1 - start < 100);
 
 	return 0;
diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
index 7ad79d5eaa84..ae453daa96c1 100644
--- a/tools/testing/selftests/proc/proc-uptime-002.c
+++ b/tools/testing/selftests/proc/proc-uptime-002.c
@@ -13,9 +13,10 @@ 
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
-// Test that boottime value in /proc/uptime increments monotonically
-// while shifting across CPUs. We don't test idle time monotonicity
-// due to broken iowait task counting, cf: comment above get_cpu_idle_time_us()
+// Test that boottime value in /proc/uptime and CLOCK_BOOTTIME increment
+// monotonically while shifting across CPUs. We don't test idle time
+// monotonicity due to broken iowait task counting, cf: comment above
+// get_cpu_idle_time_us()
 #undef NDEBUG
 #include <assert.h>
 #include <errno.h>
@@ -43,10 +44,10 @@  static inline int sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned lo
 
 int main(void)
 {
+	uint64_t u0, u1, c0, c1;
 	unsigned int len;
 	unsigned long *m;
 	unsigned int cpu;
-	uint64_t u0, u1;
 	int fd;
 
 	/* find out "nr_cpu_ids" */
@@ -62,6 +63,8 @@  int main(void)
 	assert(fd >= 0);
 
 	u0 = proc_uptime(fd);
+	c0 = clock_boottime();
+
 	for (cpu = 0; cpu < len * 8; cpu++) {
 		memset(m, 0, len);
 		m[cpu / (8 * sizeof(unsigned long))] |= 1UL << (cpu % (8 * sizeof(unsigned long)));
@@ -70,8 +73,19 @@  int main(void)
 		sys_sched_setaffinity(0, len, m);
 
 		u1 = proc_uptime(fd);
+		c1 = clock_boottime();
+
+		/* Is /proc/uptime monotonic ? */
 		assert(u1 >= u0);
+
+		/* Is CLOCK_BOOTTIME monotonic ? */
+		assert(c1 >= c0);
+
+		/* Is CLOCK_BOOTTIME VS /proc/uptime monotonic ? */
+		assert(c0 >= u0);
+
 		u0 = u1;
+		c0 = c1;
 	}
 
 	return 0;
diff --git a/tools/testing/selftests/proc/proc-uptime.h b/tools/testing/selftests/proc/proc-uptime.h
index ca55abeb0ccc..730cce4a3d73 100644
--- a/tools/testing/selftests/proc/proc-uptime.h
+++ b/tools/testing/selftests/proc/proc-uptime.h
@@ -19,9 +19,21 @@ 
 #include <string.h>
 #include <stdlib.h>
 #include <unistd.h>
+#include <time.h>
 
 #include "proc.h"
 
+static uint64_t clock_boottime(void)
+{
+	struct timespec ts;
+	int err;
+
+	err = clock_gettime(CLOCK_BOOTTIME, &ts);
+	assert(err >= 0);
+
+	return (ts.tv_sec * 100) + (ts.tv_nsec / 10000000);
+}
+
 static uint64_t proc_uptime(int fd)
 {
 	uint64_t val1, val2;