[RFC,2/4] tools/nolibc: add integer types and integer limit macros

Message ID 20230219185133.14576-3-w@1wt.eu
State New
Headers
Series tools/nolibc: add stdint and more integer types |

Commit Message

Willy Tarreau Feb. 19, 2023, 6:51 p.m. UTC
  From: Vincent Dagonneau <v@vda.io>

This commit adds some of the missing integer types to stdint.h and adds
limit macros (e.g. INTN_{MIN,MAX}).

The reference used for adding these types is
https://pubs.opengroup.org/onlinepubs/009695399/basedefs/stdint.h.html.

Note that the maximum size of size_t is implementation-defined (>65535),
in this case I chose to stick with what the kernel uses in
linux/include/uapi/asm-generic/posix_types.h: unsigned int on 32bits and
unsigned long on 64bits.

Signed-off-by: Vincent Dagonneau <v@vda.io>
[wt: size_t is always ulong since it matches the word size on all archs
 we care for; calculate the SIZE_MAX, INTPTR_MIN and INTPTR_MAX based on
 the compiler-provided __LONG_MAX__]
Signed-off-by: Willy Tarreau <w@1wt.eu>
---
 tools/include/nolibc/stdint.h | 51 +++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)
  

Comments

Thomas Weißschuh Feb. 19, 2023, 7:16 p.m. UTC | #1
On Sun, Feb 19, 2023 at 07:51:31PM +0100, Willy Tarreau wrote:
> From: Vincent Dagonneau <v@vda.io>
> 
> This commit adds some of the missing integer types to stdint.h and adds
> limit macros (e.g. INTN_{MIN,MAX}).
> 
> The reference used for adding these types is
> https://pubs.opengroup.org/onlinepubs/009695399/basedefs/stdint.h.html.
> 
> Note that the maximum size of size_t is implementation-defined (>65535),
> in this case I chose to stick with what the kernel uses in
> linux/include/uapi/asm-generic/posix_types.h: unsigned int on 32bits and
> unsigned long on 64bits.
> 
> Signed-off-by: Vincent Dagonneau <v@vda.io>
> [wt: size_t is always ulong since it matches the word size on all archs
>  we care for; calculate the SIZE_MAX, INTPTR_MIN and INTPTR_MAX based on
>  the compiler-provided __LONG_MAX__]
> Signed-off-by: Willy Tarreau <w@1wt.eu>
> ---
>  tools/include/nolibc/stdint.h | 51 +++++++++++++++++++++++++++++++++++
>  1 file changed, 51 insertions(+)
> 
> diff --git a/tools/include/nolibc/stdint.h b/tools/include/nolibc/stdint.h
> index 4ba264031df9..5f1adf9316ca 100644
> --- a/tools/include/nolibc/stdint.h
> +++ b/tools/include/nolibc/stdint.h
> @@ -21,4 +21,55 @@ typedef unsigned long     uintptr_t;
>  typedef   signed long      intptr_t;
>  typedef   signed long     ptrdiff_t;
>  
> +typedef   int8_t       int_least8_t;
> +typedef  uint8_t      uint_least8_t;
> +typedef  int16_t      int_least16_t;
> +typedef uint16_t     uint_least16_t;
> +typedef  int32_t      int_least32_t;
> +typedef uint32_t     uint_least32_t;
> +typedef  int64_t      int_least64_t;
> +typedef uint64_t     uint_least64_t;
> +
> +typedef  int64_t           intmax_t;
> +typedef uint64_t          uintmax_t;
> +
> +/* limits of integral types */
> +
> +#define        INT8_MIN  (-128)
> +#define       INT16_MIN  (-32767-1)
> +#define       INT32_MIN  (-2147483647-1)
> +#define       INT64_MIN  (-9223372036854775807LL-1)
> +
> +#define        INT8_MAX  (127)
> +#define       INT16_MAX  (32767)
> +#define       INT32_MAX  (2147483647)
> +#define       INT64_MAX  (9223372036854775807LL)
> +
> +#define       UINT8_MAX  (255)
> +#define      UINT16_MAX  (65535)
> +#define      UINT32_MAX  (4294967295U)
> +#define      UINT64_MAX  (18446744073709551615ULL)
> +
> +#define  INT_LEAST8_MIN  INT8_MIN
> +#define INT_LEAST16_MIN  INT16_MIN
> +#define INT_LEAST32_MIN  INT32_MIN
> +#define INT_LEAST64_MIN  INT64_MIN
> +
> +#define  INT_LEAST8_MAX  INT8_MAX
> +#define INT_LEAST16_MAX  INT16_MAX
> +#define INT_LEAST32_MAX  INT32_MAX
> +#define INT_LEAST64_MAX  INT64_MAX
> +
> +#define  UINT_LEAST8_MAX UINT8_MAX
> +#define UINT_LEAST16_MAX UINT16_MAX
> +#define UINT_LEAST32_MAX UINT32_MAX
> +#define UINT_LEAST64_MAX UINT64_MAX
> +
> +#define SIZE_MAX         ((__SIZE_TYPE__)(__LONG_MAX__) * 2 + 1)

Nit: size_t would look nicer than __SIZE_TYPE__.
Also SSIZE_MAX could be added.

> +#define INTPTR_MIN       (-__LONG_MAX__ - 1)
> +#define INTPTR_MAX       __LONG_MAX__
> +#define UINTPTR_MAX      (SIZE_MAX)

Braces seem unnecessary.

> +#define PTRDIFF_MIN      INTPTR_MIN
> +#define PTRDIFF_MAX      INTPTR_MAX
> +
>  #endif /* _NOLIBC_STDINT_H */
> -- 
> 2.35.3
>
  
Willy Tarreau Feb. 19, 2023, 7:23 p.m. UTC | #2
On Sun, Feb 19, 2023 at 07:16:57PM +0000, Thomas Weißschuh wrote:
> On Sun, Feb 19, 2023 at 07:51:31PM +0100, Willy Tarreau wrote:
> > From: Vincent Dagonneau <v@vda.io>
> > 
> > This commit adds some of the missing integer types to stdint.h and adds
> > limit macros (e.g. INTN_{MIN,MAX}).
> > 
> > The reference used for adding these types is
> > https://pubs.opengroup.org/onlinepubs/009695399/basedefs/stdint.h.html.
> > 
> > Note that the maximum size of size_t is implementation-defined (>65535),
> > in this case I chose to stick with what the kernel uses in
> > linux/include/uapi/asm-generic/posix_types.h: unsigned int on 32bits and
> > unsigned long on 64bits.
> > 
> > Signed-off-by: Vincent Dagonneau <v@vda.io>
> > [wt: size_t is always ulong since it matches the word size on all archs
> >  we care for; calculate the SIZE_MAX, INTPTR_MIN and INTPTR_MAX based on
> >  the compiler-provided __LONG_MAX__]
> > Signed-off-by: Willy Tarreau <w@1wt.eu>
> > ---
> >  tools/include/nolibc/stdint.h | 51 +++++++++++++++++++++++++++++++++++
> >  1 file changed, 51 insertions(+)
> > 
> > diff --git a/tools/include/nolibc/stdint.h b/tools/include/nolibc/stdint.h
> > index 4ba264031df9..5f1adf9316ca 100644
> > --- a/tools/include/nolibc/stdint.h
> > +++ b/tools/include/nolibc/stdint.h
> > @@ -21,4 +21,55 @@ typedef unsigned long     uintptr_t;
> >  typedef   signed long      intptr_t;
> >  typedef   signed long     ptrdiff_t;
> >  
> > +typedef   int8_t       int_least8_t;
> > +typedef  uint8_t      uint_least8_t;
> > +typedef  int16_t      int_least16_t;
> > +typedef uint16_t     uint_least16_t;
> > +typedef  int32_t      int_least32_t;
> > +typedef uint32_t     uint_least32_t;
> > +typedef  int64_t      int_least64_t;
> > +typedef uint64_t     uint_least64_t;
> > +
> > +typedef  int64_t           intmax_t;
> > +typedef uint64_t          uintmax_t;
> > +
> > +/* limits of integral types */
> > +
> > +#define        INT8_MIN  (-128)
> > +#define       INT16_MIN  (-32767-1)
> > +#define       INT32_MIN  (-2147483647-1)
> > +#define       INT64_MIN  (-9223372036854775807LL-1)
> > +
> > +#define        INT8_MAX  (127)
> > +#define       INT16_MAX  (32767)
> > +#define       INT32_MAX  (2147483647)
> > +#define       INT64_MAX  (9223372036854775807LL)
> > +
> > +#define       UINT8_MAX  (255)
> > +#define      UINT16_MAX  (65535)
> > +#define      UINT32_MAX  (4294967295U)
> > +#define      UINT64_MAX  (18446744073709551615ULL)
> > +
> > +#define  INT_LEAST8_MIN  INT8_MIN
> > +#define INT_LEAST16_MIN  INT16_MIN
> > +#define INT_LEAST32_MIN  INT32_MIN
> > +#define INT_LEAST64_MIN  INT64_MIN
> > +
> > +#define  INT_LEAST8_MAX  INT8_MAX
> > +#define INT_LEAST16_MAX  INT16_MAX
> > +#define INT_LEAST32_MAX  INT32_MAX
> > +#define INT_LEAST64_MAX  INT64_MAX
> > +
> > +#define  UINT_LEAST8_MAX UINT8_MAX
> > +#define UINT_LEAST16_MAX UINT16_MAX
> > +#define UINT_LEAST32_MAX UINT32_MAX
> > +#define UINT_LEAST64_MAX UINT64_MAX
> > +
> > +#define SIZE_MAX         ((__SIZE_TYPE__)(__LONG_MAX__) * 2 + 1)
> 
> Nit: size_t would look nicer than __SIZE_TYPE__.

Thanks, I hesitated.

> Also SSIZE_MAX could be added.

Ah I hadn't noticed that ssize_t was defined in the file, in this case
I agree it would be better to have it as well.

> > +#define INTPTR_MIN       (-__LONG_MAX__ - 1)
> > +#define INTPTR_MAX       __LONG_MAX__
> > +#define UINTPTR_MAX      (SIZE_MAX)
> 
> Braces seem unnecessary.

Indeed.

Thanks!
Willy
  
David Laight Feb. 20, 2023, 9:14 a.m. UTC | #3
From: Willy Tarreau
> Sent: 19 February 2023 18:52
> 
> This commit adds some of the missing integer types to stdint.h and adds
> limit macros (e.g. INTN_{MIN,MAX}).
> 
...
> 
> +typedef   int8_t       int_least8_t;
> +typedef  uint8_t      uint_least8_t;
> +typedef  int16_t      int_least16_t;
> +typedef uint16_t     uint_least16_t;
> +typedef  int32_t      int_least32_t;
> +typedef uint32_t     uint_least32_t;
> +typedef  int64_t      int_least64_t;
> +typedef uint64_t     uint_least64_t;

The are also the 'fast' variants.
Although I'd be tempted to either not define the 'least'
or 'fast' types (or maybe define them all as 'long').
The only code I've ever seen that used uint_fast32_t
got 'confused' when it was 64 bits.

...
> +/* limits of integral types */
> +
> +#define        INT8_MIN  (-128)
> +#define       INT16_MIN  (-32767-1)
> +#define       INT32_MIN  (-2147483647-1)
> +#define       INT64_MIN  (-9223372036854775807LL-1)

Those big decimal numbers are difficult to check!
A typo would be unfortunate!
Maybe (eg):
#define	INT64_MIN	(-INT64_MAX - 1)

> +#define        INT8_MAX  (127)
> +#define       INT16_MAX  (32767)
> +#define       INT32_MAX  (2147483647)
> +#define       INT64_MAX  (9223372036854775807LL)
> +
> +#define       UINT8_MAX  (255)
> +#define      UINT16_MAX  (65535)
> +#define      UINT32_MAX  (4294967295U)
> +#define      UINT64_MAX  (18446744073709551615ULL)

None of those need brackets.
Defining in hex would be more readable.
Although all the 'f' get hard to count as well.
Given that the types are defined in the same file, why
not use ~0u and ~0ull for UINT32_MAX and UINT64_MAX.

Should UINT8_MAX and UINT16_MAX be unsigned constants?
(Or even be cast to the corresponding type?)
It doesn't affect arithmetic, but would make a difference
to the over-zealous type checking in the kernel min/max
defines.

	David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
  
Willy Tarreau Feb. 20, 2023, 2:47 p.m. UTC | #4
Hi David,

On Mon, Feb 20, 2023 at 09:14:04AM +0000, David Laight wrote:
> From: Willy Tarreau
> > Sent: 19 February 2023 18:52
> > 
> > This commit adds some of the missing integer types to stdint.h and adds
> > limit macros (e.g. INTN_{MIN,MAX}).
> > 
> ...
> > 
> > +typedef   int8_t       int_least8_t;
> > +typedef  uint8_t      uint_least8_t;
> > +typedef  int16_t      int_least16_t;
> > +typedef uint16_t     uint_least16_t;
> > +typedef  int32_t      int_least32_t;
> > +typedef uint32_t     uint_least32_t;
> > +typedef  int64_t      int_least64_t;
> > +typedef uint64_t     uint_least64_t;
> 
> The are also the 'fast' variants.
> Although I'd be tempted to either not define the 'least'
> or 'fast' types (or maybe define them all as 'long').
> The only code I've ever seen that used uint_fast32_t
> got 'confused' when it was 64 bits.

Honestly I've never seen either the "least" nor the "fast" variants
used and am not at all convinced we need them. But they're not causing
issues either and I'm fine with Vincent adding them.

> ...
> > +/* limits of integral types */
> > +
> > +#define        INT8_MIN  (-128)
> > +#define       INT16_MIN  (-32767-1)
> > +#define       INT32_MIN  (-2147483647-1)
> > +#define       INT64_MIN  (-9223372036854775807LL-1)
> 
> Those big decimal numbers are difficult to check!
> A typo would be unfortunate!

That's also the purpose of the test!

> Maybe (eg):
> #define	INT64_MIN	(-INT64_MAX - 1)

Some would argue that it's less easy to check when you're grepping for
a value. How often have you found yourself bouncing between glibc
include files looking for a definition for example ? I'm not sold on
either choice, it's indeed just a matter of taste in the end.

> > +#define        INT8_MAX  (127)
> > +#define       INT16_MAX  (32767)
> > +#define       INT32_MAX  (2147483647)
> > +#define       INT64_MAX  (9223372036854775807LL)
> > +
> > +#define       UINT8_MAX  (255)
> > +#define      UINT16_MAX  (65535)
> > +#define      UINT32_MAX  (4294967295U)
> > +#define      UINT64_MAX  (18446744073709551615ULL)
> 
> None of those need brackets.

Most likely it was done to be more uniform with the rest above.

> Defining in hex would be more readable.

Sure they would but it's not the same. Hex numbers are usually
considered as neither positive nor negative probably because they're
more commonly used to manipulate bits rather than quantities, and often
they will not trigger warnings on overflows. Look for example:

  $ cat yy.c 
  int a = 0x80000000;
  int b = -0x80000000;
  int c = 2147483648;
  int d = -2147483648;

  int e =  0x80000000 + 1;
  int f =  0x80000000 - 1;
  int g =  2147483648 + 1;
  int h = -2147483648 - 1;
  
  $ clang  -W -Wall -Wextra -c yy.c
  yy.c:3:9: warning: implicit conversion from 'long' to 'int' changes value from 2147483648 to -2147483648 [-Wconstant-conversion]
  int c = 2147483648;
      ~   ^~~~~~~~~~
  yy.c:8:21: warning: implicit conversion from 'long' to 'int' changes value from 2147483649 to -2147483647 [-Wconstant-conversion]
  int g =  2147483648 + 1;
      ~    ~~~~~~~~~~~^~~
  yy.c:9:21: warning: implicit conversion from 'long' to 'int' changes value from -2147483649 to 2147483647 [-Wconstant-conversion]
  int h = -2147483648 - 1;
      ~   ~~~~~~~~~~~~^~~

Notice how the hex ones didn't complain. Just for this I would
rather keep the decimal ones, even if less readable.

> Although all the 'f' get hard to count as well.
> Given that the types are defined in the same file, why
> not use ~0u and ~0ull for UINT32_MAX and UINT64_MAX.

That's what I usually do but here I think it's mostly to stay
consistent across the whole file.

> Should UINT8_MAX and UINT16_MAX be unsigned constants?
> (Or even be cast to the corresponding type?)

Same, better not if we want to keep the compiler's warnings in case
of wrong assignment. Just compare the outputs of:

   char c = UINT8_MAX;

when UINT8_MAX is defined as 255 and 255U. Only the former gives me:

  yy.c:11:11: warning: implicit conversion from 'int' to 'char' changes value from 255 to -1 [-Wconstant-conversion]
  char cc = 255;
       ~~   ^~~

Thus it gives one extra opportunity to spot a typo.

Thanks!
Willy
  
Vincent Dagonneau Feb. 20, 2023, 8:27 p.m. UTC | #5
Hi David,

On Mon, Feb 20, 2023, at 09:47, Willy Tarreau wrote:
> Hi David,
>
> On Mon, Feb 20, 2023 at 09:14:04AM +0000, David Laight wrote:
>> From: Willy Tarreau
>> > Sent: 19 February 2023 18:52
>> > 
>> > This commit adds some of the missing integer types to stdint.h and adds
>> > limit macros (e.g. INTN_{MIN,MAX}).
>> > 
>> ...
>> > 
>> > +typedef   int8_t       int_least8_t;
>> > +typedef  uint8_t      uint_least8_t;
>> > +typedef  int16_t      int_least16_t;
>> > +typedef uint16_t     uint_least16_t;
>> > +typedef  int32_t      int_least32_t;
>> > +typedef uint32_t     uint_least32_t;
>> > +typedef  int64_t      int_least64_t;
>> > +typedef uint64_t     uint_least64_t;
>> 
>> The are also the 'fast' variants.
>> Although I'd be tempted to either not define the 'least'
>> or 'fast' types (or maybe define them all as 'long').
>> The only code I've ever seen that used uint_fast32_t
>> got 'confused' when it was 64 bits.
>
> Honestly I've never seen either the "least" nor the "fast" variants
> used and am not at all convinced we need them. But they're not causing
> issues either and I'm fine with Vincent adding them.
>

I have never seen them used in the wild either but I included them in the v5.

>> ...
>> > +/* limits of integral types */
>> > +
>> > +#define        INT8_MIN  (-128)
>> > +#define       INT16_MIN  (-32767-1)
>> > +#define       INT32_MIN  (-2147483647-1)
>> > +#define       INT64_MIN  (-9223372036854775807LL-1)
>> 
>> Those big decimal numbers are difficult to check!
>> A typo would be unfortunate!
>
> That's also the purpose of the test!
>

My rationale for writing the full decimal in the header file is that we have a check for that in the tests. Furthermore, I wrote the number in decimal there but in hexadecimal in the test. Hopefully, at least one of them is right and catches any mistake.

>> Maybe (eg):
>> #define	INT64_MIN	(-INT64_MAX - 1)
>
> Some would argue that it's less easy to check when you're grepping for
> a value. How often have you found yourself bouncing between glibc
> include files looking for a definition for example ? I'm not sold on
> either choice, it's indeed just a matter of taste in the end.
>
>> > +#define        INT8_MAX  (127)
>> > +#define       INT16_MAX  (32767)
>> > +#define       INT32_MAX  (2147483647)
>> > +#define       INT64_MAX  (9223372036854775807LL)
>> > +
>> > +#define       UINT8_MAX  (255)
>> > +#define      UINT16_MAX  (65535)
>> > +#define      UINT32_MAX  (4294967295U)
>> > +#define      UINT64_MAX  (18446744073709551615ULL)
>> 
>> None of those need brackets.
>
> Most likely it was done to be more uniform with the rest above.
>

It is mostly comestic, yes.

>> Defining in hex would be more readable.
>
> Sure they would but it's not the same. Hex numbers are usually
> considered as neither positive nor negative probably because they're
> more commonly used to manipulate bits rather than quantities, and often
> they will not trigger warnings on overflows. Look for example:
>
>   $ cat yy.c 
>   int a = 0x80000000;
>   int b = -0x80000000;
>   int c = 2147483648;
>   int d = -2147483648;
>
>   int e =  0x80000000 + 1;
>   int f =  0x80000000 - 1;
>   int g =  2147483648 + 1;
>   int h = -2147483648 - 1;
>  
>   $ clang  -W -Wall -Wextra -c yy.c
>   yy.c:3:9: warning: implicit conversion from 'long' to 'int' changes 
> value from 2147483648 to -2147483648 [-Wconstant-conversion]
>   int c = 2147483648;
>       ~   ^~~~~~~~~~
>   yy.c:8:21: warning: implicit conversion from 'long' to 'int' changes 
> value from 2147483649 to -2147483647 [-Wconstant-conversion]
>   int g =  2147483648 + 1;
>       ~    ~~~~~~~~~~~^~~
>   yy.c:9:21: warning: implicit conversion from 'long' to 'int' changes 
> value from -2147483649 to 2147483647 [-Wconstant-conversion]
>   int h = -2147483648 - 1;
>       ~   ~~~~~~~~~~~~^~~
>
> Notice how the hex ones didn't complain. Just for this I would
> rather keep the decimal ones, even if less readable.
>

Additionally, like previously mentioned, the tests are based on the hex representation as it is indeed easier to read.

>> Although all the 'f' get hard to count as well.
>> Given that the types are defined in the same file, why
>> not use ~0u and ~0ull for UINT32_MAX and UINT64_MAX.
>
> That's what I usually do but here I think it's mostly to stay
> consistent across the whole file.
>

Indeed, it is also mostly cosmetic.

>> Should UINT8_MAX and UINT16_MAX be unsigned constants?
>> (Or even be cast to the corresponding type?)
>
> Same, better not if we want to keep the compiler's warnings in case
> of wrong assignment. Just compare the outputs of:
>
>    char c = UINT8_MAX;
>
> when UINT8_MAX is defined as 255 and 255U. Only the former gives me:
>
>   yy.c:11:11: warning: implicit conversion from 'int' to 'char' changes 
> value from 255 to -1 [-Wconstant-conversion]
>   char cc = 255;
>        ~~   ^~~
>
> Thus it gives one extra opportunity to spot a typo.
>
> Thanks!
> Willy

Thank you for the review!
  

Patch

diff --git a/tools/include/nolibc/stdint.h b/tools/include/nolibc/stdint.h
index 4ba264031df9..5f1adf9316ca 100644
--- a/tools/include/nolibc/stdint.h
+++ b/tools/include/nolibc/stdint.h
@@ -21,4 +21,55 @@  typedef unsigned long     uintptr_t;
 typedef   signed long      intptr_t;
 typedef   signed long     ptrdiff_t;
 
+typedef   int8_t       int_least8_t;
+typedef  uint8_t      uint_least8_t;
+typedef  int16_t      int_least16_t;
+typedef uint16_t     uint_least16_t;
+typedef  int32_t      int_least32_t;
+typedef uint32_t     uint_least32_t;
+typedef  int64_t      int_least64_t;
+typedef uint64_t     uint_least64_t;
+
+typedef  int64_t           intmax_t;
+typedef uint64_t          uintmax_t;
+
+/* limits of integral types */
+
+#define        INT8_MIN  (-128)
+#define       INT16_MIN  (-32767-1)
+#define       INT32_MIN  (-2147483647-1)
+#define       INT64_MIN  (-9223372036854775807LL-1)
+
+#define        INT8_MAX  (127)
+#define       INT16_MAX  (32767)
+#define       INT32_MAX  (2147483647)
+#define       INT64_MAX  (9223372036854775807LL)
+
+#define       UINT8_MAX  (255)
+#define      UINT16_MAX  (65535)
+#define      UINT32_MAX  (4294967295U)
+#define      UINT64_MAX  (18446744073709551615ULL)
+
+#define  INT_LEAST8_MIN  INT8_MIN
+#define INT_LEAST16_MIN  INT16_MIN
+#define INT_LEAST32_MIN  INT32_MIN
+#define INT_LEAST64_MIN  INT64_MIN
+
+#define  INT_LEAST8_MAX  INT8_MAX
+#define INT_LEAST16_MAX  INT16_MAX
+#define INT_LEAST32_MAX  INT32_MAX
+#define INT_LEAST64_MAX  INT64_MAX
+
+#define  UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+#define SIZE_MAX         ((__SIZE_TYPE__)(__LONG_MAX__) * 2 + 1)
+#define INTPTR_MIN       (-__LONG_MAX__ - 1)
+#define INTPTR_MAX       __LONG_MAX__
+#define UINTPTR_MAX      (SIZE_MAX)
+#define PTRDIFF_MIN      INTPTR_MIN
+#define PTRDIFF_MAX      INTPTR_MAX
+
 #endif /* _NOLIBC_STDINT_H */